summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c12
-rw-r--r--drivers/atm/solos-pci.c8
-rw-r--r--drivers/clk/qcom/Kconfig1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c24
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c1
-rw-r--r--drivers/cxl/core/hdm.c5
-rw-r--r--drivers/cxl/core/memdev.c27
-rw-r--r--drivers/cxl/core/pci.c13
-rw-r--r--drivers/cxl/core/pmu.c2
-rw-r--r--drivers/cxl/core/port.c4
-rw-r--r--drivers/cxl/core/region.c5
-rw-r--r--drivers/dma/fsl-edma-common.c1
-rw-r--r--drivers/dma/fsl-edma-main.c12
-rw-r--r--drivers/dma/idxd/registers.h12
-rw-r--r--drivers/dma/idxd/submit.c14
-rw-r--r--drivers/dma/stm32-dma.c8
-rw-r--r--drivers/dma/ti/k3-psil-am62.c12
-rw-r--r--drivers/dma/ti/k3-psil-am62a.c12
-rw-r--r--drivers/dpll/dpll_netlink.c13
-rw-r--r--drivers/edac/versal_edac.c4
-rw-r--r--drivers/firmware/efi/libstub/loongarch-stub.c4
-rw-r--r--drivers/firmware/efi/libstub/loongarch.c6
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c28
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c4
-rw-r--r--drivers/gpu/drm/drm_auth.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c8
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c19
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h12
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c18
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c2
-rw-r--r--drivers/hid/hid-apple.c2
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-lenovo.c3
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-acpi.c5
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c3
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c53
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c11
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c27
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c27
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.h15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c179
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c71
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c55
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c441
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c29
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c6
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c7
-rw-r--r--drivers/pci/controller/pci-loongson.c46
-rw-r--r--drivers/pci/controller/vmd.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c9
-rw-r--r--drivers/pci/pcie/aspm.c65
-rw-r--r--drivers/perf/arm-cmn.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c2
-rw-r--r--drivers/phy/sunplus/phy-sunplus-usb2.c2
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c5
-rw-r--r--drivers/platform/x86/intel/vbtn.c19
-rw-r--r--drivers/platform/x86/intel_ips.c30
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c6
-rw-r--r--drivers/soundwire/intel_ace2x.c3
-rw-r--r--drivers/soundwire/stream.c7
118 files changed, 1511 insertions, 761 deletions
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 4ccf1994b97a..d530384f8d60 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -53,10 +53,12 @@
#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
-#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
- (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
(REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
+#define BUTTRESS_ALL_IRQ_MASK (BUTTRESS_IRQ_MASK | \
+ (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)))
+
#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
@@ -74,8 +76,12 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
vdev->wa.clear_runtime_mem = false;
vdev->wa.d3hot_after_power_off = true;
- if (ivpu_device_id(vdev) == PCI_DEVICE_ID_MTL && ivpu_revision(vdev) < 4)
+ REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
+ if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
+ /* Writing 1s does not clear the interrupt status register */
vdev->wa.interrupt_clear_with_0 = true;
+ REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
+ }
IVPU_PRINT_WA(punit_disabled);
IVPU_PRINT_WA(clear_runtime_mem);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 94fbc3abe60e..d3c30a28c410 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -449,9 +449,9 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
struct sk_buff *skb;
unsigned int len;
- spin_lock(&card->cli_queue_lock);
+ spin_lock_bh(&card->cli_queue_lock);
skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
- spin_unlock(&card->cli_queue_lock);
+ spin_unlock_bh(&card->cli_queue_lock);
if(skb == NULL)
return sprintf(buf, "No data.\n");
@@ -956,14 +956,14 @@ static void pclose(struct atm_vcc *vcc)
struct pkt_hdr *header;
/* Remove any yet-to-be-transmitted packets from the pending queue */
- spin_lock(&card->tx_queue_lock);
+ spin_lock_bh(&card->tx_queue_lock);
skb_queue_walk_safe(&card->tx_queue[port], skb, tmpskb) {
if (SKB_CB(skb)->vcc == vcc) {
skb_unlink(skb, &card->tx_queue[port]);
solos_pop(vcc, skb);
}
}
- spin_unlock(&card->tx_queue_lock);
+ spin_unlock_bh(&card->tx_queue_lock);
skb = alloc_skb(sizeof(*header), GFP_KERNEL);
if (!skb) {
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index ad1acd9b7426..dbc3950c5960 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -767,6 +767,7 @@ config SM_CAMCC_8450
config SM_CAMCC_8550
tristate "SM8550 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SM_GCC_8550
help
Support for the camera clock controller on SM8550 devices.
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
index aa53797dbfc1..75071e0cd321 100644
--- a/drivers/clk/rockchip/clk-rk3128.c
+++ b/drivers/clk/rockchip/clk-rk3128.c
@@ -138,7 +138,7 @@ PNAME(mux_pll_src_5plls_p) = { "cpll", "gpll", "gpll_div2", "gpll_div3", "usb480
PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "gpll_div2", "usb480m" };
PNAME(mux_pll_src_3plls_p) = { "cpll", "gpll", "gpll_div2" };
-PNAME(mux_aclk_peri_src_p) = { "gpll_peri", "cpll_peri", "gpll_div2_peri", "gpll_div3_peri" };
+PNAME(mux_clk_peri_src_p) = { "gpll", "cpll", "gpll_div2", "gpll_div3" };
PNAME(mux_mmc_src_p) = { "cpll", "gpll", "gpll_div2", "xin24m" };
PNAME(mux_clk_cif_out_src_p) = { "clk_cif_src", "xin24m" };
PNAME(mux_sclk_vop_src_p) = { "cpll", "gpll", "gpll_div2", "gpll_div3" };
@@ -275,23 +275,17 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(0), 11, GFLAGS),
/* PD_PERI */
- GATE(0, "gpll_peri", "gpll", CLK_IGNORE_UNUSED,
+ COMPOSITE(0, "clk_peri_src", mux_clk_peri_src_p, 0,
+ RK2928_CLKSEL_CON(10), 14, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(2), 0, GFLAGS),
- GATE(0, "cpll_peri", "cpll", CLK_IGNORE_UNUSED,
- RK2928_CLKGATE_CON(2), 0, GFLAGS),
- GATE(0, "gpll_div2_peri", "gpll_div2", CLK_IGNORE_UNUSED,
- RK2928_CLKGATE_CON(2), 0, GFLAGS),
- GATE(0, "gpll_div3_peri", "gpll_div3", CLK_IGNORE_UNUSED,
- RK2928_CLKGATE_CON(2), 0, GFLAGS),
- COMPOSITE_NOGATE(0, "aclk_peri_src", mux_aclk_peri_src_p, 0,
- RK2928_CLKSEL_CON(10), 14, 2, MFLAGS, 0, 5, DFLAGS),
- COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0,
+
+ COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "clk_peri_src", 0,
RK2928_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
RK2928_CLKGATE_CON(2), 3, GFLAGS),
- COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", 0,
+ COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "clk_peri_src", 0,
RK2928_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
RK2928_CLKGATE_CON(2), 2, GFLAGS),
- GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", 0,
+ GATE(ACLK_PERI, "aclk_peri", "clk_peri_src", 0,
RK2928_CLKGATE_CON(2), 1, GFLAGS),
GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 0,
@@ -316,7 +310,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(2), 15, GFLAGS),
- COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
+ COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
RK2928_CLKGATE_CON(2), 11, GFLAGS),
@@ -490,7 +484,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS),
GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
- GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 13, GFLAGS),
+ GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS),
GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index 16dabe2b9c47..db713e1526cd 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -72,6 +72,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
RK3036_PLL_RATE(297000000, 2, 99, 4, 1, 1, 0),
+ RK3036_PLL_RATE(292500000, 1, 195, 4, 4, 1, 0),
RK3036_PLL_RATE(241500000, 2, 161, 4, 2, 1, 0),
RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0),
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 1cc9be85ba4c..7d97790b893d 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -363,10 +363,9 @@ resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
{
resource_size_t base = -1;
- down_read(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_dpa_rwsem);
if (cxled->dpa_res)
base = cxled->dpa_res->start;
- up_read(&cxl_dpa_rwsem);
return base;
}
@@ -839,6 +838,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
else
cxld->target_type = CXL_DECODER_DEVMEM;
+
+ guard(rwsem_write)(&cxl_region_rwsem);
if (cxld->id != cxl_num_decoders_committed(port)) {
dev_warn(&port->dev,
"decoder%d.%d: Committed out of order\n",
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index fc5c2b414793..2f43d368ba07 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -227,10 +227,16 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
if (!port || !is_cxl_endpoint(port))
return -EINVAL;
- rc = down_read_interruptible(&cxl_dpa_rwsem);
+ rc = down_read_interruptible(&cxl_region_rwsem);
if (rc)
return rc;
+ rc = down_read_interruptible(&cxl_dpa_rwsem);
+ if (rc) {
+ up_read(&cxl_region_rwsem);
+ return rc;
+ }
+
if (cxl_num_decoders_committed(port) == 0) {
/* No regions mapped to this memdev */
rc = cxl_get_poison_by_memdev(cxlmd);
@@ -239,6 +245,7 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
rc = cxl_get_poison_by_endpoint(port);
}
up_read(&cxl_dpa_rwsem);
+ up_read(&cxl_region_rwsem);
return rc;
}
@@ -324,10 +331,16 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- rc = down_read_interruptible(&cxl_dpa_rwsem);
+ rc = down_read_interruptible(&cxl_region_rwsem);
if (rc)
return rc;
+ rc = down_read_interruptible(&cxl_dpa_rwsem);
+ if (rc) {
+ up_read(&cxl_region_rwsem);
+ return rc;
+ }
+
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
goto out;
@@ -355,6 +368,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
out:
up_read(&cxl_dpa_rwsem);
+ up_read(&cxl_region_rwsem);
return rc;
}
@@ -372,10 +386,16 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- rc = down_read_interruptible(&cxl_dpa_rwsem);
+ rc = down_read_interruptible(&cxl_region_rwsem);
if (rc)
return rc;
+ rc = down_read_interruptible(&cxl_dpa_rwsem);
+ if (rc) {
+ up_read(&cxl_region_rwsem);
+ return rc;
+ }
+
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
goto out;
@@ -412,6 +432,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
out:
up_read(&cxl_dpa_rwsem);
+ up_read(&cxl_region_rwsem);
return rc;
}
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index eff20e83d0a6..37e1652afbc7 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -620,7 +620,7 @@ void read_cdat_data(struct cxl_port *port)
struct pci_dev *pdev = NULL;
struct cxl_memdev *cxlmd;
size_t cdat_length;
- void *cdat_table;
+ void *cdat_table, *cdat_buf;
int rc;
if (is_cxl_memdev(uport)) {
@@ -651,16 +651,15 @@ void read_cdat_data(struct cxl_port *port)
return;
}
- cdat_table = devm_kzalloc(dev, cdat_length + sizeof(__le32),
- GFP_KERNEL);
- if (!cdat_table)
+ cdat_buf = devm_kzalloc(dev, cdat_length + sizeof(__le32), GFP_KERNEL);
+ if (!cdat_buf)
return;
- rc = cxl_cdat_read_table(dev, cdat_doe, cdat_table, &cdat_length);
+ rc = cxl_cdat_read_table(dev, cdat_doe, cdat_buf, &cdat_length);
if (rc)
goto err;
- cdat_table = cdat_table + sizeof(__le32);
+ cdat_table = cdat_buf + sizeof(__le32);
if (cdat_checksum(cdat_table, cdat_length))
goto err;
@@ -670,7 +669,7 @@ void read_cdat_data(struct cxl_port *port)
err:
/* Don't leave table data allocated on error */
- devm_kfree(dev, cdat_table);
+ devm_kfree(dev, cdat_buf);
dev_err(dev, "Failed to read/validate CDAT.\n");
}
EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
diff --git a/drivers/cxl/core/pmu.c b/drivers/cxl/core/pmu.c
index 7684c843e5a5..5d8e06b0ba6e 100644
--- a/drivers/cxl/core/pmu.c
+++ b/drivers/cxl/core/pmu.c
@@ -23,7 +23,7 @@ const struct device_type cxl_pmu_type = {
static void remove_dev(void *dev)
{
- device_del(dev);
+ device_unregister(dev);
}
int devm_cxl_pmu_add(struct device *parent, struct cxl_pmu_regs *regs,
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 38441634e4c6..b7c93bb18f6e 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -226,9 +226,9 @@ static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *at
char *buf)
{
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
- u64 base = cxl_dpa_resource_start(cxled);
- return sysfs_emit(buf, "%#llx\n", base);
+ guard(rwsem_read)(&cxl_dpa_rwsem);
+ return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled));
}
static DEVICE_ATTR_RO(dpa_resource);
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 56e575c79bb4..3e817a6f94c6 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2467,10 +2467,6 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
struct cxl_poison_context ctx;
int rc = 0;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
- return rc;
-
ctx = (struct cxl_poison_context) {
.port = port
};
@@ -2480,7 +2476,6 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev),
&ctx);
- up_read(&cxl_region_rwsem);
return rc;
}
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 6a3abe5b1790..b53f46245c37 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -828,6 +828,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
dma_pool_destroy(fsl_chan->tcd_pool);
fsl_chan->tcd_pool = NULL;
fsl_chan->is_sw = false;
+ fsl_chan->srcid = 0;
}
void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 4635e16d7705..238a69bd0d6f 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -396,9 +396,8 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
- if (IS_ERR(link)) {
- dev_err(dev, "Failed to add device_link to %d: %ld\n", i,
- PTR_ERR(link));
+ if (!link) {
+ dev_err(dev, "Failed to add device_link to %d\n", i);
return -EINVAL;
}
@@ -631,6 +630,8 @@ static int fsl_edma_suspend_late(struct device *dev)
for (i = 0; i < fsl_edma->n_chans; i++) {
fsl_chan = &fsl_edma->chans[i];
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
/* Make sure chan is idle or will force disable. */
if (unlikely(!fsl_chan->idle)) {
@@ -655,13 +656,16 @@ static int fsl_edma_resume_early(struct device *dev)
for (i = 0; i < fsl_edma->n_chans; i++) {
fsl_chan = &fsl_edma->chans[i];
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
fsl_chan->pm_state = RUNNING;
edma_write_tcdreg(fsl_chan, 0, csr);
if (fsl_chan->slave_id != 0)
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
}
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
+ if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 7b54a3939ea1..315c004f58e4 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -440,12 +440,14 @@ union wqcfg {
/*
* This macro calculates the offset into the GRPCFG register
* idxd - struct idxd *
- * n - wq id
- * ofs - the index of the 32b dword for the config register
+ * n - group id
+ * ofs - the index of the 64b qword for the config register
*
- * The WQCFG register block is divided into groups per each wq. The n index
- * allows us to move to the register group that's for that particular wq.
- * Each register is 32bits. The ofs gives us the number of register to access.
+ * The GRPCFG register block is divided into three sub-registers, which
+ * are GRPWQCFG, GRPENGCFG and GRPFLGCFG. The n index allows us to move
+ * to the register block that contains the three sub-registers.
+ * Each register block is 64bits. And the ofs gives us the offset
+ * within the GRPWQCFG register to access.
*/
#define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\
(n) * GRPCFG_SIZE + sizeof(u64) * (ofs))
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index c01db23e3333..3f922518e3a5 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -183,13 +183,6 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
portal = idxd_wq_portal_addr(wq);
/*
- * The wmb() flushes writes to coherent DMA data before
- * possibly triggering a DMA read. The wmb() is necessary
- * even on UP because the recipient is a device.
- */
- wmb();
-
- /*
* Pending the descriptor to the lockless list for the irq_entry
* that we designated the descriptor to.
*/
@@ -199,6 +192,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
llist_add(&desc->llnode, &ie->pending_llist);
}
+ /*
+ * The wmb() flushes writes to coherent DMA data before
+ * possibly triggering a DMA read. The wmb() is necessary
+ * even on UP because the recipient is a device.
+ */
+ wmb();
+
if (wq_dedicated(wq)) {
iosubmit_cmds512(portal, desc->hw, 1);
} else {
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 72d83cd9ed6b..90857d08a1a7 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1246,8 +1246,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
enum dma_slave_buswidth max_width;
struct stm32_dma_desc *desc;
size_t xfer_count, offset;
- u32 num_sgs, best_burst, dma_burst, threshold;
- int i;
+ u32 num_sgs, best_burst, threshold;
+ int dma_burst, i;
num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
@@ -1266,6 +1266,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
threshold, max_width);
dma_burst = stm32_dma_get_burst(chan, best_burst);
+ if (dma_burst < 0) {
+ kfree(desc);
+ return NULL;
+ }
stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
desc->sg_req[i].chan_reg.dma_scr =
diff --git a/drivers/dma/ti/k3-psil-am62.c b/drivers/dma/ti/k3-psil-am62.c
index 2b6fd6e37c61..1272b1541f61 100644
--- a/drivers/dma/ti/k3-psil-am62.c
+++ b/drivers/dma/ti/k3-psil-am62.c
@@ -74,7 +74,9 @@ static struct psil_ep am62_src_ep_map[] = {
PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
- /* PDMA_MAIN0 - SPI0-3 */
+ /* PDMA_MAIN0 - SPI0-2 */
+ PSIL_PDMA_XY_PKT(0x4300),
+ PSIL_PDMA_XY_PKT(0x4301),
PSIL_PDMA_XY_PKT(0x4302),
PSIL_PDMA_XY_PKT(0x4303),
PSIL_PDMA_XY_PKT(0x4304),
@@ -85,8 +87,6 @@ static struct psil_ep am62_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4309),
PSIL_PDMA_XY_PKT(0x430a),
PSIL_PDMA_XY_PKT(0x430b),
- PSIL_PDMA_XY_PKT(0x430c),
- PSIL_PDMA_XY_PKT(0x430d),
/* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0x4400),
PSIL_PDMA_XY_PKT(0x4401),
@@ -141,7 +141,9 @@ static struct psil_ep am62_dst_ep_map[] = {
/* SAUL */
PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
- /* PDMA_MAIN0 - SPI0-3 */
+ /* PDMA_MAIN0 - SPI0-2 */
+ PSIL_PDMA_XY_PKT(0xc300),
+ PSIL_PDMA_XY_PKT(0xc301),
PSIL_PDMA_XY_PKT(0xc302),
PSIL_PDMA_XY_PKT(0xc303),
PSIL_PDMA_XY_PKT(0xc304),
@@ -152,8 +154,6 @@ static struct psil_ep am62_dst_ep_map[] = {
PSIL_PDMA_XY_PKT(0xc309),
PSIL_PDMA_XY_PKT(0xc30a),
PSIL_PDMA_XY_PKT(0xc30b),
- PSIL_PDMA_XY_PKT(0xc30c),
- PSIL_PDMA_XY_PKT(0xc30d),
/* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0xc400),
PSIL_PDMA_XY_PKT(0xc401),
diff --git a/drivers/dma/ti/k3-psil-am62a.c b/drivers/dma/ti/k3-psil-am62a.c
index ca9d71f91422..4cf9123b0e93 100644
--- a/drivers/dma/ti/k3-psil-am62a.c
+++ b/drivers/dma/ti/k3-psil-am62a.c
@@ -84,7 +84,9 @@ static struct psil_ep am62a_src_ep_map[] = {
PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
- /* PDMA_MAIN0 - SPI0-3 */
+ /* PDMA_MAIN0 - SPI0-2 */
+ PSIL_PDMA_XY_PKT(0x4300),
+ PSIL_PDMA_XY_PKT(0x4301),
PSIL_PDMA_XY_PKT(0x4302),
PSIL_PDMA_XY_PKT(0x4303),
PSIL_PDMA_XY_PKT(0x4304),
@@ -95,8 +97,6 @@ static struct psil_ep am62a_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4309),
PSIL_PDMA_XY_PKT(0x430a),
PSIL_PDMA_XY_PKT(0x430b),
- PSIL_PDMA_XY_PKT(0x430c),
- PSIL_PDMA_XY_PKT(0x430d),
/* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0x4400),
PSIL_PDMA_XY_PKT(0x4401),
@@ -151,7 +151,9 @@ static struct psil_ep am62a_dst_ep_map[] = {
/* SAUL */
PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
- /* PDMA_MAIN0 - SPI0-3 */
+ /* PDMA_MAIN0 - SPI0-2 */
+ PSIL_PDMA_XY_PKT(0xc300),
+ PSIL_PDMA_XY_PKT(0xc301),
PSIL_PDMA_XY_PKT(0xc302),
PSIL_PDMA_XY_PKT(0xc303),
PSIL_PDMA_XY_PKT(0xc304),
@@ -162,8 +164,6 @@ static struct psil_ep am62a_dst_ep_map[] = {
PSIL_PDMA_XY_PKT(0xc309),
PSIL_PDMA_XY_PKT(0xc30a),
PSIL_PDMA_XY_PKT(0xc30b),
- PSIL_PDMA_XY_PKT(0xc30c),
- PSIL_PDMA_XY_PKT(0xc30d),
/* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0xc400),
PSIL_PDMA_XY_PKT(0xc401),
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 442a0ebeb953..ce7cf736f020 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -925,7 +925,6 @@ dpll_pin_parent_pin_set(struct dpll_pin *pin, struct nlattr *parent_nest,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[DPLL_A_PIN_MAX + 1];
- enum dpll_pin_state state;
u32 ppin_idx;
int ret;
@@ -936,10 +935,14 @@ dpll_pin_parent_pin_set(struct dpll_pin *pin, struct nlattr *parent_nest,
return -EINVAL;
}
ppin_idx = nla_get_u32(tb[DPLL_A_PIN_PARENT_ID]);
- state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
- ret = dpll_pin_on_pin_state_set(pin, ppin_idx, state, extack);
- if (ret)
- return ret;
+
+ if (tb[DPLL_A_PIN_STATE]) {
+ enum dpll_pin_state state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
+
+ ret = dpll_pin_on_pin_state_set(pin, ppin_idx, state, extack);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/edac/versal_edac.c b/drivers/edac/versal_edac.c
index 87e730dfefa0..8625de20fc71 100644
--- a/drivers/edac/versal_edac.c
+++ b/drivers/edac/versal_edac.c
@@ -966,10 +966,10 @@ static int mc_probe(struct platform_device *pdev)
edac_mc_id = emif_get_id(pdev->dev.of_node);
regval = readl(ddrmc_baseaddr + XDDR_REG_CONFIG0_OFFSET);
- num_chans = FIELD_PREP(XDDR_REG_CONFIG0_NUM_CHANS_MASK, regval);
+ num_chans = FIELD_GET(XDDR_REG_CONFIG0_NUM_CHANS_MASK, regval);
num_chans++;
- num_csrows = FIELD_PREP(XDDR_REG_CONFIG0_NUM_RANKS_MASK, regval);
+ num_csrows = FIELD_GET(XDDR_REG_CONFIG0_NUM_RANKS_MASK, regval);
num_csrows *= 2;
if (!num_csrows)
num_csrows = 1;
diff --git a/drivers/firmware/efi/libstub/loongarch-stub.c b/drivers/firmware/efi/libstub/loongarch-stub.c
index 72c71ae201f0..d6ec5d4b8dbe 100644
--- a/drivers/firmware/efi/libstub/loongarch-stub.c
+++ b/drivers/firmware/efi/libstub/loongarch-stub.c
@@ -35,9 +35,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
return status;
}
-unsigned long kernel_entry_address(void)
+unsigned long kernel_entry_address(unsigned long kernel_addr)
{
unsigned long base = (unsigned long)&kernel_offset - kernel_offset;
- return (unsigned long)&kernel_entry - base + VMLINUX_LOAD_ADDRESS;
+ return (unsigned long)&kernel_entry - base + kernel_addr;
}
diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c
index 807cba2693fc..0e0aa6cda73f 100644
--- a/drivers/firmware/efi/libstub/loongarch.c
+++ b/drivers/firmware/efi/libstub/loongarch.c
@@ -37,9 +37,9 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
return EFI_SUCCESS;
}
-unsigned long __weak kernel_entry_address(void)
+unsigned long __weak kernel_entry_address(unsigned long kernel_addr)
{
- return *(unsigned long *)(PHYSADDR(VMLINUX_LOAD_ADDRESS) + 8);
+ return *(unsigned long *)(kernel_addr + 8) - VMLINUX_LOAD_ADDRESS + kernel_addr;
}
efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
@@ -73,7 +73,7 @@ efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0);
csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1);
- real_kernel_entry = (void *)kernel_entry_address();
+ real_kernel_entry = (void *)kernel_entry_address(kernel_addr);
real_kernel_entry(true, (unsigned long)cmdline_ptr,
(unsigned long)efi_system_table);
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 1bfdae34df39..da9b7b8d0716 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -307,17 +307,20 @@ static void setup_unaccepted_memory(void)
efi_err("Memory acceptance protocol failed\n");
}
+static efi_char16_t *efistub_fw_vendor(void)
+{
+ unsigned long vendor = efi_table_attr(efi_system_table, fw_vendor);
+
+ return (efi_char16_t *)vendor;
+}
+
static const efi_char16_t apple[] = L"Apple";
static void setup_quirks(struct boot_params *boot_params)
{
- efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
- efi_table_attr(efi_system_table, fw_vendor);
-
- if (!memcmp(fw_vendor, apple, sizeof(apple))) {
- if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
- retrieve_apple_device_properties(boot_params);
- }
+ if (IS_ENABLED(CONFIG_APPLE_PROPERTIES) &&
+ !memcmp(efistub_fw_vendor(), apple, sizeof(apple)))
+ retrieve_apple_device_properties(boot_params);
}
/*
@@ -765,11 +768,25 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && !efi_nokaslr) {
u64 range = KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR - kernel_total_size;
+ static const efi_char16_t ami[] = L"American Megatrends";
efi_get_seed(seed, sizeof(seed));
virt_addr += (range * seed[1]) >> 32;
virt_addr &= ~(CONFIG_PHYSICAL_ALIGN - 1);
+
+ /*
+ * Older Dell systems with AMI UEFI firmware v2.0 may hang
+ * while decompressing the kernel if physical address
+ * randomization is enabled.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=218173
+ */
+ if (efi_system_table->hdr.revision <= EFI_2_00_SYSTEM_TABLE_REVISION &&
+ !memcmp(efistub_fw_vendor(), ami, sizeof(ami))) {
+ efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n");
+ seed[0] = 0;
+ }
}
status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1f64d8cbb14d..8dee52ce26d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4516,8 +4516,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_ras_suspend(adev);
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
-
amdgpu_device_ip_suspend_phase1(adev);
if (!adev->in_s0ix)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index d79b4ca1ecfc..5ad03f2afdb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1343,6 +1343,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
abo = ttm_to_amdgpu_bo(bo);
+ WARN_ON(abo->vm_bo);
+
if (abo->kfd_bo)
amdgpu_amdkfd_release_notify(abo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index a2287bb25223..a160265ddc07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -642,13 +642,14 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
if (!entry->bo)
return;
+
+ entry->bo->vm_bo = NULL;
shadow = amdgpu_bo_shadowed(entry->bo);
if (shadow) {
ttm_bo_set_bulk_move(&shadow->tbo, NULL);
amdgpu_bo_unref(&shadow);
}
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
- entry->bo->vm_bo = NULL;
spin_lock(&entry->vm->status_lock);
list_del(&entry->vm_status);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 9df011323d4b..6ede85b28cc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -155,13 +155,6 @@ static int jpeg_v4_0_5_hw_init(void *handle)
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
-
- WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
- ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
- VCN_JPEG_DB_CTRL__EN_MASK);
-
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
@@ -336,6 +329,14 @@ static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, true);
+ /* doorbell programming is done for every playback */
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
+
+ WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
+ ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+ VCN_JPEG_DB_CTRL__EN_MASK);
+
/* disable power gating */
r = jpeg_v4_0_5_disable_static_power_gating(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 45377a175250..8d5d86675a7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -813,12 +813,12 @@ static int sdma_v2_4_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
+ adev->sdma.num_instances = SDMA_MAX_INSTANCE;
+
r = sdma_v2_4_init_microcode(adev);
if (r)
return r;
- adev->sdma.num_instances = SDMA_MAX_INSTANCE;
-
sdma_v2_4_set_ring_funcs(adev);
sdma_v2_4_set_buffer_funcs(adev);
sdma_v2_4_set_vm_pte_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 83c240f741b5..0058f3f7cf6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1643,6 +1643,32 @@ static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
+static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
+ * disallow GFXOFF in some cases leading to
+ * hangs in SDMA. Disallow GFXOFF while SDMA is active.
+ * We can probably just limit this to 5.2.3,
+ * but it shouldn't hurt for other parts since
+ * this GFXOFF will be disallowed anyway when SDMA is
+ * active, this just makes it explicit.
+ */
+ amdgpu_gfx_off_ctrl(adev, false);
+}
+
+static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
+ * disallow GFXOFF in some cases leading to
+ * hangs in SDMA. Allow GFXOFF when SDMA is complete.
+ */
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
.name = "sdma_v5_2",
.early_init = sdma_v5_2_early_init,
@@ -1690,6 +1716,8 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
.test_ib = sdma_v5_2_ring_test_ib,
.insert_nop = sdma_v5_2_ring_insert_nop,
.pad_ib = sdma_v5_2_ring_pad_ib,
+ .begin_use = sdma_v5_2_ring_begin_use,
+ .end_use = sdma_v5_2_ring_end_use,
.emit_wreg = sdma_v5_2_ring_emit_wreg,
.emit_reg_wait = sdma_v5_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b452796fc6d3..c8c00c2a5224 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5182,6 +5182,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
if (plane->type == DRM_PLANE_TYPE_CURSOR)
return;
+ if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
+ goto ffu;
+
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
clips = drm_plane_get_damage_clips(new_plane_state);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 9649934ea186..e2a3aa8812df 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -465,6 +465,7 @@ struct dc_cursor_mi_param {
struct fixed31_32 v_scale_ratio;
enum dc_rotation_angle rotation;
bool mirror;
+ struct dc_stream_state *stream;
};
/* IPP related types */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 139cf31d2e45..89c3bf0fe0c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1077,8 +1077,16 @@ void hubp2_cursor_set_position(
if (src_y_offset < 0)
src_y_offset = 0;
/* Save necessary cursor info x, y position. w, h is saved in attribute func. */
- hubp->cur_rect.x = src_x_offset + param->viewport.x;
- hubp->cur_rect.y = src_y_offset + param->viewport.y;
+ if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ param->rotation != ROTATION_ANGLE_0) {
+ hubp->cur_rect.x = 0;
+ hubp->cur_rect.y = 0;
+ hubp->cur_rect.w = param->stream->timing.h_addressable;
+ hubp->cur_rect.h = param->stream->timing.v_addressable;
+ } else {
+ hubp->cur_rect.x = src_x_offset + param->viewport.x;
+ hubp->cur_rect.y = src_y_offset + param->viewport.y;
+ }
}
void hubp2_clk_cntl(struct hubp *hubp, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 39cf1ae3a3e1..f154a3eb1d1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -124,7 +124,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 600.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 186.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
{
.state = 1,
@@ -133,7 +133,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
{
.state = 2,
@@ -142,7 +142,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
{
.state = 3,
@@ -151,7 +151,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 371.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
{
.state = 4,
@@ -160,7 +160,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 417.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
},
.num_states = 5,
@@ -348,6 +348,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
clock_limits[i].socclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
+ clock_limits[i].dtbclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
@@ -360,6 +362,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
+ clk_table->num_entries;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 4d1336e5afc2..180f8a98a361 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -6329,7 +6329,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.NoOfDPPThisState,
mode_lib->ms.dpte_group_bytes,
s->HostVMInefficiencyFactor,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
s->NextMaxVStartup = s->MaxVStartupAllPlanes[j];
@@ -6542,7 +6542,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes,
mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k],
mode_lib->ms.MetaRowBytes[j][k],
mode_lib->ms.DPTEBytesPerRow[j][k],
@@ -7687,7 +7687,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
- CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+ CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState;
@@ -7957,7 +7957,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
UseMinimumDCFCLK_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
UseMinimumDCFCLK_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
UseMinimumDCFCLK_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+ UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
UseMinimumDCFCLK_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
UseMinimumDCFCLK_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
UseMinimumDCFCLK_params->ImmediateFlipRequirement = s->ImmediateFlipRequiredFinal;
@@ -8699,7 +8699,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
- CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+ CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0];
@@ -8805,7 +8805,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
mode_lib->ms.cache_display_cfg.hw.DPPPerSurface,
locals->dpte_group_bytes,
s->HostVMInefficiencyFactor,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
locals->TCalc = 24.0 / locals->DCFCLKDeepSleep;
@@ -8995,7 +8995,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+ CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
@@ -9240,7 +9240,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes,
locals->PDEAndMetaPTEBytesFrame[k],
locals->MetaRowByte[k],
locals->PixelPTEBytesPerRow[k],
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index fa8fe5bf7e57..db06a5b749b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -423,8 +423,9 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels; i++) {
- p->in_states->state_array[i].dtbclk_mhz =
- dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz;
+ if (dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz > 0)
+ p->in_states->state_array[i].dtbclk_mhz =
+ dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz;
}
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 2b8b8366538e..cdb903116eb7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -3417,7 +3417,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
.rotation = pipe_ctx->plane_state->rotation,
- .mirror = pipe_ctx->plane_state->horizontal_mirror
+ .mirror = pipe_ctx->plane_state->horizontal_mirror,
+ .stream = pipe_ctx->stream,
};
bool pipe_split_on = false;
bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 996e4ee99023..e5cfaaef70b3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -287,8 +287,8 @@ bool set_default_brightness_aux(struct dc_link *link)
if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
if (!read_default_bl_aux(link, &default_backlight))
default_backlight = 150000;
- // if > 5000, it might be wrong readback
- if (default_backlight > 5000000)
+ // if < 1 nits or > 5000, it might be wrong readback
+ if (default_backlight < 1000 || default_backlight > 5000000)
default_backlight = 150000;
return edp_set_backlight_level_nits(link, true,
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index a522a7c02911..1675314a3ff2 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -839,6 +839,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
((dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x08) ||
(dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x07)))
isPSRSUSupported = false;
+ else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
+ isPSRSUSupported = false;
else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
isPSRSUSupported = true;
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index ca2ece24e1e0..49028dde0f87 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2198,10 +2198,10 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) {
if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE)
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_mclk_od)) {
+ } else if (DEVICE_ATTR_IS(pp_mclk_od)) {
if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_sclk_od)) {
+ } else if (DEVICE_ATTR_IS(pp_sclk_od)) {
if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 2ed2585ded37..6899b3dc1f12 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -236,7 +236,7 @@ static int
drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv)
{
if (file_priv->was_master &&
- rcu_access_pointer(file_priv->pid) == task_pid(current))
+ rcu_access_pointer(file_priv->pid) == task_tgid(current))
return 0;
if (!capable(CAP_SYS_ADMIN))
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index df9bf3c9206e..cb90e70d85e8 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -715,8 +715,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
struct drm_modeset_acquire_ctx ctx;
- int ret;
- int i;
+ int ret, i, num_connectors = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
@@ -871,6 +870,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
connector->name);
connector_set[i] = connector;
+ num_connectors++;
}
}
@@ -879,7 +879,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
set.y = crtc_req->y;
set.mode = mode;
set.connectors = connector_set;
- set.num_connectors = crtc_req->count_connectors;
+ set.num_connectors = num_connectors;
set.fb = fb;
if (drm_drv_uses_atomic_modeset(dev))
@@ -892,7 +892,7 @@ out:
drm_framebuffer_put(fb);
if (connector_set) {
- for (i = 0; i < crtc_req->count_connectors; i++) {
+ for (i = 0; i < num_connectors; i++) {
if (connector_set[i])
drm_connector_put(connector_set[i]);
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 39db08f803ea..3b4065099872 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2309,7 +2309,8 @@ int drm_edid_override_connector_update(struct drm_connector *connector)
override = drm_edid_override_get(connector);
if (override) {
- num_modes = drm_edid_connector_update(connector, override);
+ if (drm_edid_connector_update(connector, override) == 0)
+ num_modes = drm_edid_connector_add_modes(connector);
drm_edid_free(override);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index dbc1b66c8ee4..1abfafbbfa75 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -650,19 +650,30 @@ intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 link_bw, u8 rate_select)
{
- u8 link_config[2];
+ u8 lane_count = crtc_state->lane_count;
- /* Write the link configuration data */
- link_config[0] = link_bw;
- link_config[1] = crtc_state->lane_count;
if (crtc_state->enhanced_framing)
- link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+ lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ if (link_bw) {
+ /* DP and eDP v1.3 and earlier link bw set method. */
+ u8 link_config[] = { link_bw, lane_count };
- /* eDP 1.4 rate select method. */
- if (!link_bw)
- drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
- &rate_select, 1);
+ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config,
+ ARRAY_SIZE(link_config));
+ } else {
+ /*
+ * eDP v1.4 and later link rate set method.
+ *
+ * eDP v1.4x sinks shall ignore DP_LINK_RATE_SET if
+ * DP_LINK_BW_SET is set. Avoid writing DP_LINK_BW_SET.
+ *
+ * eDP v1.5 sinks allow choosing either, and the last choice
+ * shall be active.
+ */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_LANE_COUNT_SET, lane_count);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_RATE_SET, rate_select);
+ }
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 19b35ece31f1..646f367a13f5 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1374,7 +1374,8 @@ plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane,
struct drm_i915_private *i915 = to_i915(fb->base.dev);
unsigned int stride_tiles;
- if (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
+ if ((IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
+ src_stride_tiles < dst_stride_tiles)
stride_tiles = src_stride_tiles;
else
stride_tiles = dst_stride_tiles;
@@ -1501,8 +1502,20 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
size += remap_info->size;
} else {
- unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane,
- remap_info->width);
+ unsigned int dst_stride;
+
+ /*
+ * The hardware automagically calculates the CCS AUX surface
+ * stride from the main surface stride so can't really remap a
+ * smaller subset (unless we'd remap in whole AUX page units).
+ */
+ if (intel_fb_needs_pot_stride_remap(fb) &&
+ intel_fb_is_ccs_modifier(fb->base.modifier))
+ dst_stride = remap_info->src_stride;
+ else
+ dst_stride = remap_info->width;
+
+ dst_stride = plane_view_dst_stride_tiles(fb, color_plane, dst_stride);
assign_chk_ovf(i915, remap_info->dst_stride, dst_stride);
color_plane_info->mapping_stride = dst_stride *
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 1e7c97243fcf..8a934bada624 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -504,7 +504,6 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
{
struct drm_plane *plane = NULL;
struct intel_plane *intel_plane;
- struct intel_plane_state *plane_state = NULL;
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
struct drm_atomic_state *drm_state = crtc_state->uapi.state;
@@ -536,6 +535,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
/* walkthrough scaler_users bits and start assigning scalers */
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
+ struct intel_plane_state *plane_state = NULL;
int *scaler_id;
const char *name;
int idx, ret;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index d5ed904f355d..6801f8b95c53 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1293,7 +1293,7 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
if (msg)
drm_notice(&engine->i915->drm,
"Resetting %s for %s\n", engine->name, msg);
- atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
+ i915_increase_reset_engine_count(&engine->i915->gpu_error, engine);
ret = intel_gt_reset_engine(engine);
if (ret) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index d37698bd6b91..17df71117cc7 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -5001,7 +5001,8 @@ static void capture_error_state(struct intel_guc *guc,
if (match) {
intel_engine_set_hung_context(e, ce);
engine_mask |= e->mask;
- atomic_inc(&i915->gpu_error.reset_engine_count[e->uabi_class]);
+ i915_increase_reset_engine_count(&i915->gpu_error,
+ e);
}
}
@@ -5013,7 +5014,7 @@ static void capture_error_state(struct intel_guc *guc,
} else {
intel_engine_set_hung_context(ce->engine, ce);
engine_mask = ce->engine->mask;
- atomic_inc(&i915->gpu_error.reset_engine_count[ce->engine->uabi_class]);
+ i915_increase_reset_engine_count(&i915->gpu_error, ce->engine);
}
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 9f5971f5e980..48f6c00402c4 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -16,6 +16,7 @@
#include "display/intel_display_device.h"
#include "gt/intel_engine.h"
+#include "gt/intel_engine_types.h"
#include "gt/intel_gt_types.h"
#include "gt/uc/intel_uc_fw.h"
@@ -232,7 +233,7 @@ struct i915_gpu_error {
atomic_t reset_count;
/** Number of times an engine has been reset */
- atomic_t reset_engine_count[I915_NUM_ENGINES];
+ atomic_t reset_engine_count[MAX_ENGINE_CLASS];
};
struct drm_i915_error_state_buf {
@@ -255,7 +256,14 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
const struct intel_engine_cs *engine)
{
- return atomic_read(&error->reset_engine_count[engine->uabi_class]);
+ return atomic_read(&error->reset_engine_count[engine->class]);
+}
+
+static inline void
+i915_increase_reset_engine_count(struct i915_gpu_error *error,
+ const struct intel_engine_cs *engine)
+{
+ atomic_inc(&error->reset_engine_count[engine->class]);
}
#define CORE_DUMP_FLAG_NONE 0x0
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
index 4ddc6d902752..7d41874a49c5 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -37,8 +37,9 @@ int igt_live_test_begin(struct igt_live_test *t,
}
for_each_engine(engine, gt, id)
- t->reset_engine[id] =
- i915_reset_engine_count(&i915->gpu_error, engine);
+ t->reset_engine[i][id] =
+ i915_reset_engine_count(&i915->gpu_error,
+ engine);
}
t->reset_global = i915_reset_count(&i915->gpu_error);
@@ -66,14 +67,14 @@ int igt_live_test_end(struct igt_live_test *t)
for_each_gt(gt, i915, i) {
for_each_engine(engine, gt, id) {
- if (t->reset_engine[id] ==
+ if (t->reset_engine[i][id] ==
i915_reset_engine_count(&i915->gpu_error, engine))
continue;
gt_err(gt, "%s(%s): engine '%s' was reset %d times!\n",
t->func, t->name, engine->name,
i915_reset_engine_count(&i915->gpu_error, engine) -
- t->reset_engine[id]);
+ t->reset_engine[i][id]);
return -EIO;
}
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h
index 36ed42736c52..83e3ad430922 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.h
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h
@@ -7,6 +7,7 @@
#ifndef IGT_LIVE_TEST_H
#define IGT_LIVE_TEST_H
+#include "gt/intel_gt_defines.h" /* for I915_MAX_GT */
#include "gt/intel_engine.h" /* for I915_NUM_ENGINES */
struct drm_i915_private;
@@ -17,7 +18,7 @@ struct igt_live_test {
const char *name;
unsigned int reset_global;
- unsigned int reset_engine[I915_NUM_ENGINES];
+ unsigned int reset_engine[I915_MAX_GT][I915_NUM_ENGINES];
};
/*
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
index f81dc34c9c3e..c1bc8b00d938 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -203,7 +203,7 @@ void mtk_gamma_set(struct device *dev, struct drm_crtc_state *state)
/* Disable RELAY mode to pass the processed image */
cfg_val &= ~GAMMA_RELAY_MODE;
- cfg_val = readl(gamma->regs + DISP_GAMMA_CFG);
+ writel(cfg_val, gamma->regs + DISP_GAMMA_CFG);
}
void mtk_gamma_config(struct device *dev, unsigned int w,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index c277b9fae950..db43f9dff912 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -788,6 +788,7 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
crtc);
struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ unsigned long flags;
if (mtk_crtc->event && mtk_crtc_state->base.event)
DRM_ERROR("new event while there is still a pending event\n");
@@ -795,7 +796,11 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
if (mtk_crtc_state->base.event) {
mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
mtk_crtc->event = mtk_crtc_state->base.event;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
mtk_crtc_state->base.event = NULL;
}
}
@@ -921,7 +926,14 @@ static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_drm_crtc *mtk_crtc = NULL;
+
+ if (!crtc)
+ return NULL;
+
+ mtk_crtc = to_mtk_crtc(crtc);
+ if (!mtk_crtc)
+ return NULL;
return mtk_crtc->dma_dev;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 2dfaa613276a..2b0c35cacbc6 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -443,6 +443,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
struct mtk_drm_private *private = drm->dev_private;
struct mtk_drm_private *priv_n;
struct device *dma_dev = NULL;
+ struct drm_crtc *crtc;
int ret, i, j;
if (drm_firmware_drivers_only())
@@ -519,7 +520,9 @@ static int mtk_drm_kms_init(struct drm_device *drm)
}
/* Use OVL device for all DMA memory allocations */
- dma_dev = mtk_drm_crtc_dma_dev_get(drm_crtc_from_index(drm, 0));
+ crtc = drm_crtc_from_index(drm, 0);
+ if (crtc)
+ dma_dev = mtk_drm_crtc_dma_dev_get(crtc);
if (!dma_dev) {
ret = -ENODEV;
dev_err(drm->dev, "Need at least one OVL device\n");
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
index e4279f1772a1..377d0e0cef84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
@@ -385,7 +385,7 @@ nvkm_uoutp_mthd_inherit(struct nvkm_outp *outp, void *argv, u32 argc)
/* Ensure an ior is hooked up to this outp already */
ior = outp->func->inherit(outp);
- if (!ior)
+ if (!ior || !ior->arm.head)
return -ENODEV;
/* With iors, there will be a separate output path for each type of connector - and all of
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 1b811d6972a1..201022ae9214 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -49,14 +49,14 @@
#include <subdev/mmu.h>
struct gk20a_instobj {
- struct nvkm_memory memory;
+ struct nvkm_instobj base;
struct nvkm_mm_node *mn;
struct gk20a_instmem *imem;
/* CPU mapping */
u32 *vaddr;
};
-#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
+#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, base.memory)
/*
* Used for objects allocated using the DMA API
@@ -148,7 +148,7 @@ gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj)
list_del(&obj->vaddr_node);
vunmap(obj->base.vaddr);
obj->base.vaddr = NULL;
- imem->vaddr_use -= nvkm_memory_size(&obj->base.memory);
+ imem->vaddr_use -= nvkm_memory_size(&obj->base.base.memory);
nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use,
imem->vaddr_max);
}
@@ -283,7 +283,7 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
{
struct gk20a_instobj *node = gk20a_instobj(memory);
struct nvkm_vmm_map map = {
- .memory = &node->memory,
+ .memory = &node->base.memory,
.offset = offset,
.mem = node->mn,
};
@@ -391,8 +391,8 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
return -ENOMEM;
*_node = &node->base;
- nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
- node->base.memory.ptrs = &gk20a_instobj_ptrs;
+ nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.base.memory);
+ node->base.base.memory.ptrs = &gk20a_instobj_ptrs;
node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
&node->handle, GFP_KERNEL,
@@ -438,8 +438,8 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
*_node = &node->base;
node->dma_addrs = (void *)(node->pages + npages);
- nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
- node->base.memory.ptrs = &gk20a_instobj_ptrs;
+ nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.base.memory);
+ node->base.base.memory.ptrs = &gk20a_instobj_ptrs;
/* Allocate backing memory */
for (i = 0; i < npages; i++) {
@@ -533,7 +533,7 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
else
ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
align, &node);
- *pmemory = node ? &node->memory : NULL;
+ *pmemory = node ? &node->base.memory : NULL;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 6e3670508e3a..30919c872ac8 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -326,7 +326,7 @@ static const struct drm_display_mode ltk050h3148w_mode = {
static const struct ltk050h3146w_desc ltk050h3148w_data = {
.mode = &ltk050h3148w_mode,
.init = ltk050h3148w_init_sequence,
- .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_BURST,
};
static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index d9e9829b2200..b9c7c0ed7bcc 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -347,6 +347,8 @@ static const struct apple_non_apple_keyboard non_apple_keyboards[] = {
{ "Hailuck" },
{ "Jamesdonkey" },
{ "A3R" },
+ { "hfd.cn" },
+ { "WKB603" },
};
static bool apple_is_non_apple_keyboard(struct hid_device *hdev)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c6e4e0d1f214..72046039d1be 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -740,6 +740,7 @@
#define USB_VENDOR_ID_LABTEC 0x1020
#define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006
+#define USB_DEVICE_ID_LABTEC_ODDOR_HANDBRAKE 0x8888
#define USB_VENDOR_ID_LAVIEW 0x22D4
#define USB_DEVICE_ID_GLORIOUS_MODEL_I 0x1503
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 7c1b33be9d13..149a3c74346b 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -692,7 +692,8 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
* so set middlebutton_state to 3
* to never apply workaround anymore
*/
- if (cptkbd_data->middlebutton_state == 1 &&
+ if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD &&
+ cptkbd_data->middlebutton_state == 1 &&
usage->type == EV_REL &&
(usage->code == REL_X || usage->code == REL_Y)) {
cptkbd_data->middlebutton_state = 3;
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index ea472923fab0..e0bbf0c6345d 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -121,6 +121,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_T609A), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_ODDOR_HANDBRAKE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/i2c-hid/i2c-hid-acpi.c b/drivers/hid/i2c-hid/i2c-hid-acpi.c
index ac918a9ea8d3..1b49243adb16 100644
--- a/drivers/hid/i2c-hid/i2c-hid-acpi.c
+++ b/drivers/hid/i2c-hid/i2c-hid-acpi.c
@@ -40,6 +40,11 @@ static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
* ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
*/
{ "CHPN0001" },
+ /*
+ * The IDEA5002 ACPI device causes high interrupt usage and spurious
+ * wakeups from suspend.
+ */
+ { "IDEA5002" },
{ }
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 3d6f0a466a9e..f9f886289b97 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -328,9 +328,6 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
* compare it to the stored version, just create the meta
*/
if (io_sq->disable_meta_caching) {
- if (unlikely(!ena_tx_ctx->meta_valid))
- return -EINVAL;
-
*have_meta = true;
return ena_com_create_meta(io_sq, ena_meta);
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index b5bca4814830..c44c44e26ddf 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -74,6 +74,8 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
struct ena_tx_buffer *tx_info);
static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
int first_index, int count);
+static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
static void ena_increase_stat(u64 *statp, u64 cnt,
@@ -457,23 +459,22 @@ static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
{
+ u32 xdp_first_ring = adapter->xdp_first_ring;
+ u32 xdp_num_queues = adapter->xdp_num_queues;
int rc = 0;
- rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
- adapter->xdp_num_queues);
+ rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
if (rc)
goto setup_err;
- rc = ena_create_io_tx_queues_in_range(adapter,
- adapter->xdp_first_ring,
- adapter->xdp_num_queues);
+ rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
if (rc)
goto create_err;
return 0;
create_err:
- ena_free_all_io_tx_resources(adapter);
+ ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
setup_err:
return rc;
}
@@ -1492,11 +1493,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
if (unlikely(!skb))
return NULL;
- /* sync this buffer for CPU use */
- dma_sync_single_for_cpu(rx_ring->dev,
- dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
- len,
- DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, buf_addr + buf_offset, len);
dma_sync_single_for_device(rx_ring->dev,
dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
@@ -1515,17 +1511,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
- pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
-
/* If XDP isn't loaded try to reuse part of the RX buffer */
reuse_rx_buf_page = !is_xdp_loaded &&
ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
- dma_sync_single_for_cpu(rx_ring->dev,
- pre_reuse_paddr + pkt_offset,
- len,
- DMA_FROM_DEVICE);
-
if (!reuse_rx_buf_page)
ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
@@ -1671,20 +1660,23 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
}
}
-static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs)
{
struct ena_rx_buffer *rx_info;
int ret;
+ /* XDP multi-buffer packets not supported */
+ if (unlikely(num_descs > 1)) {
+ netdev_err_once(rx_ring->adapter->netdev,
+ "xdp: dropped unsupported multi-buffer packets\n");
+ ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp);
+ return ENA_XDP_DROP;
+ }
+
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
xdp_prepare_buff(xdp, page_address(rx_info->page),
rx_info->buf_offset,
rx_ring->ena_bufs[0].len, false);
- /* If for some reason we received a bigger packet than
- * we expect, then we simply drop it
- */
- if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
- return ENA_XDP_DROP;
ret = ena_xdp_execute(rx_ring, xdp);
@@ -1719,6 +1711,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
int xdp_flags = 0;
int total_len = 0;
int xdp_verdict;
+ u8 pkt_offset;
int rc = 0;
int i;
@@ -1745,15 +1738,21 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* First descriptor might have an offset set by the device */
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
- rx_info->buf_offset += ena_rx_ctx.pkt_offset;
+ pkt_offset = ena_rx_ctx.pkt_offset;
+ rx_info->buf_offset += pkt_offset;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
+ dma_sync_single_for_cpu(rx_ring->dev,
+ dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
+ rx_ring->ena_bufs[0].len,
+ DMA_FROM_DEVICE);
+
if (ena_xdp_present_ring(rx_ring))
- xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
+ xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs);
/* allocate skb and fill it */
if (xdp_verdict == ENA_XDP_PASS)
@@ -1777,7 +1776,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
if (xdp_verdict & ENA_XDP_FORWARDED) {
ena_unmap_rx_buff_attrs(rx_ring,
&rx_ring->rx_buffer_info[req_id],
- 0);
+ DMA_ATTR_SKIP_CPU_SYNC);
rx_ring->rx_buffer_info[req_id].page = NULL;
}
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 694daeaf3e61..e1885c1eb100 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -938,11 +938,14 @@ void aq_ring_free(struct aq_ring_s *self)
return;
kfree(self->buff_ring);
+ self->buff_ring = NULL;
- if (self->dx_ring)
+ if (self->dx_ring) {
dma_free_coherent(aq_nic_get_dev(self->aq_nic),
self->size * self->dx_size, self->dx_ring,
self->dx_ring_pa);
+ self->dx_ring = NULL;
+ }
}
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index d0359b569afe..579eebb6fc56 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1748,16 +1748,32 @@ static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
struct sk_buff *skb)
{
+ skb_mark_for_recycle(skb);
+
if (skb->dev != bp->dev) {
/* this packet belongs to a vf-rep */
bnxt_vf_rep_rx(bp, skb);
return;
}
skb_record_rx_queue(skb, bnapi->index);
- skb_mark_for_recycle(skb);
napi_gro_receive(&bnapi->napi, skb);
}
+static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
+ struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
+{
+ u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
+
+ if (BNXT_PTP_RX_TS_VALID(flags))
+ goto ts_valid;
+ if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
+ return false;
+
+ts_valid:
+ *cmpl_ts = ts;
+ return true;
+}
+
/* returns the following:
* 1 - 1 packet successfully received
* 0 - successful TPA_START, packet not completed yet
@@ -1783,6 +1799,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct sk_buff *skb;
struct xdp_buff xdp;
u32 flags, misc;
+ u32 cmpl_ts;
void *data;
int rc = 0;
@@ -2005,10 +2022,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
- if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
- RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
+ if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
if (bp->flags & BNXT_FLAG_CHIP_P5) {
- u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
u64 ns, ts;
if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
@@ -10731,10 +10746,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_free_mem(bp, irq_re_init);
}
-int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
- int rc = 0;
-
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
/* If we get here, it means firmware reset is in progress
* while we are trying to close. We can safely proceed with
@@ -10749,15 +10762,18 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
#ifdef CONFIG_BNXT_SRIOV
if (bp->sriov_cfg) {
+ int rc;
+
rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
!bp->sriov_cfg,
BNXT_SRIOV_CFG_WAIT_TMO);
- if (rc)
- netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+ if (!rc)
+ netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
+ else if (rc < 0)
+ netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
}
#endif
__bnxt_close_nic(bp, irq_re_init, link_re_init);
- return rc;
}
static int bnxt_close(struct net_device *dev)
@@ -13940,6 +13956,8 @@ static int bnxt_resume(struct device *device)
if (rc)
goto resume_exit;
+ bnxt_clear_reservations(bp, true);
+
if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
rc = -ENODEV;
goto resume_exit;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index e702dbc3e6b1..a7d7b09ea162 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -161,7 +161,7 @@ struct rx_cmp {
#define RX_CMP_FLAGS_ERROR (1 << 6)
#define RX_CMP_FLAGS_PLACEMENT (7 << 7)
#define RX_CMP_FLAGS_RSS_VALID (1 << 10)
- #define RX_CMP_FLAGS_UNUSED (1 << 11)
+ #define RX_CMP_FLAGS_PKT_METADATA_PRESENT (1 << 11)
#define RX_CMP_FLAGS_ITYPES_SHIFT 12
#define RX_CMP_FLAGS_ITYPES_MASK 0xf000
#define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12)
@@ -188,6 +188,12 @@ struct rx_cmp {
__le32 rx_cmp_rss_hash;
};
+#define BNXT_PTP_RX_TS_VALID(flags) \
+ (((flags) & RX_CMP_FLAGS_ITYPES_MASK) == RX_CMP_FLAGS_ITYPE_PTP_W_TS)
+
+#define BNXT_ALL_RX_TS_VALID(flags) \
+ !((flags) & RX_CMP_FLAGS_PKT_METADATA_PRESENT)
+
#define RX_CMP_HASH_VALID(rxcmp) \
((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
@@ -2375,7 +2381,7 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
void bnxt_reenable_sriov(struct bnxt *bp);
-int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_close_nic(struct bnxt *, bool, bool);
void bnxt_get_ring_err_stats(struct bnxt *bp,
struct bnxt_total_ring_err_stats *stats);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index f302dac56599..89809f1b129c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -449,15 +449,8 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
return -ENODEV;
}
bnxt_ulp_stop(bp);
- if (netif_running(bp->dev)) {
- rc = bnxt_close_nic(bp, true, true);
- if (rc) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to close");
- dev_close(bp->dev);
- rtnl_unlock();
- break;
- }
- }
+ if (netif_running(bp->dev))
+ bnxt_close_nic(bp, true, true);
bnxt_vf_reps_free(bp);
rc = bnxt_hwrm_func_drv_unrgtr(bp);
if (rc) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index f3f384773ac0..5f67a7f94e7d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -165,9 +165,8 @@ static int bnxt_set_coalesce(struct net_device *dev,
reset_coalesce:
if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
if (update_stats) {
- rc = bnxt_close_nic(bp, true, false);
- if (!rc)
- rc = bnxt_open_nic(bp, true, false);
+ bnxt_close_nic(bp, true, false);
+ rc = bnxt_open_nic(bp, true, false);
} else {
rc = bnxt_hwrm_set_coal(bp);
}
@@ -972,12 +971,7 @@ static int bnxt_set_channels(struct net_device *dev,
* before PF unload
*/
}
- rc = bnxt_close_nic(bp, true, false);
- if (rc) {
- netdev_err(bp->dev, "Set channel failure rc :%x\n",
- rc);
- return rc;
- }
+ bnxt_close_nic(bp, true, false);
}
if (sh) {
@@ -4042,12 +4036,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
bnxt_run_fw_tests(bp, test_mask, &test_results);
} else {
bnxt_ulp_stop(bp);
- rc = bnxt_close_nic(bp, true, false);
- if (rc) {
- etest->flags |= ETH_TEST_FL_FAILED;
- bnxt_ulp_start(bp, rc);
- return;
- }
+ bnxt_close_nic(bp, true, false);
bnxt_run_fw_tests(bp, test_mask, &test_results);
buf[BNXT_MACLPBK_TEST_IDX] = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index f3886710e778..6e3da3362bd6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -521,9 +521,8 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
if (netif_running(bp->dev)) {
if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
- rc = bnxt_close_nic(bp, false, false);
- if (!rc)
- rc = bnxt_open_nic(bp, false, false);
+ bnxt_close_nic(bp, false, false);
+ rc = bnxt_open_nic(bp, false, false);
} else {
bnxt_ptp_cfg_tstamp_filters(bp);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index 4798fb7fe35d..b6a534a3e0b1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -139,7 +139,8 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
filter_block->acl_id, acl_entry_cfg);
- dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
+ dma_unmap_single(dev, acl_entry_cfg->key_iova,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
DMA_TO_DEVICE);
if (err) {
dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
@@ -181,8 +182,8 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
block->acl_id, acl_entry_cfg);
- dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
- DMA_TO_DEVICE);
+ dma_unmap_single(dev, acl_entry_cfg->key_iova,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE);
if (err) {
dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
kfree(cmd_buff);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 97d3151076d5..e01a246124ac 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -1998,9 +1998,6 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
return notifier_from_errno(err);
}
-static struct notifier_block dpaa2_switch_port_switchdev_nb;
-static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb;
-
static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
struct net_device *upper_dev,
struct netlink_ext_ack *extack)
@@ -2043,9 +2040,7 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
goto err_egress_flood;
err = switchdev_bridge_port_offload(netdev, netdev, NULL,
- &dpaa2_switch_port_switchdev_nb,
- &dpaa2_switch_port_switchdev_blocking_nb,
- false, extack);
+ NULL, NULL, false, extack);
if (err)
goto err_switchdev_offload;
@@ -2079,9 +2074,7 @@ static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, vo
static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
{
- switchdev_bridge_port_unoffload(netdev, NULL,
- &dpaa2_switch_port_switchdev_nb,
- &dpaa2_switch_port_switchdev_blocking_nb);
+ switchdev_bridge_port_unoffload(netdev, NULL, NULL, NULL);
}
static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c3b7694a7485..e08c7b572497 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3731,31 +3731,26 @@ static int fec_set_features(struct net_device *netdev,
return 0;
}
-static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb)
-{
- struct vlan_ethhdr *vhdr;
- unsigned short vlan_TCI = 0;
-
- if (skb->protocol == htons(ETH_P_ALL)) {
- vhdr = (struct vlan_ethhdr *)(skb->data);
- vlan_TCI = ntohs(vhdr->h_vlan_TCI);
- }
-
- return vlan_TCI;
-}
-
static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u16 vlan_tag;
+ u16 vlan_tag = 0;
if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
return netdev_pick_tx(ndev, skb, NULL);
- vlan_tag = fec_enet_get_raw_vlan_tci(skb);
- if (!vlan_tag)
+ /* VLAN is present in the payload.*/
+ if (eth_type_vlan(skb->protocol)) {
+ struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
+
+ vlan_tag = ntohs(vhdr->h_vlan_TCI);
+ /* VLAN is present in the skb but not yet pushed in the payload.*/
+ } else if (skb_vlan_tag_present(skb)) {
+ vlan_tag = skb->vlan_tci;
+ } else {
return vlan_tag;
+ }
return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index e7ab89dc883a..63b45c61cc4a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -292,6 +292,7 @@ struct iavf_adapter {
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
+#define IAVF_FLAG_FDIR_ENABLED BIT(21)
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 19cbfe554689..dc499fe7734e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1061,7 +1061,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *rule = NULL;
int ret = 0;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
spin_lock_bh(&adapter->fdir_fltr_lock);
@@ -1203,7 +1203,7 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
unsigned int cnt = 0;
int val = 0;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
cmd->data = IAVF_MAX_FDIR_FILTERS;
@@ -1395,7 +1395,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
int count = 50;
int err;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
if (fsp->flow_type & FLOW_MAC_EXT)
@@ -1436,12 +1436,16 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
spin_lock_bh(&adapter->fdir_fltr_lock);
iavf_fdir_list_add_fltr(adapter, fltr);
adapter->fdir_active_fltr++;
- fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ if (adapter->link_up) {
+ fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ } else {
+ fltr->state = IAVF_FDIR_FLTR_INACTIVE;
+ }
spin_unlock_bh(&adapter->fdir_fltr_lock);
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
-
+ if (adapter->link_up)
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
ret:
if (err && fltr)
kfree(fltr);
@@ -1463,7 +1467,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
struct iavf_fdir_fltr *fltr = NULL;
int err = 0;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
spin_lock_bh(&adapter->fdir_fltr_lock);
@@ -1472,6 +1476,11 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
+ list_del(&fltr->list);
+ kfree(fltr);
+ adapter->fdir_active_fltr--;
+ fltr = NULL;
} else {
err = -EBUSY;
}
@@ -1780,7 +1789,7 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
ret = 0;
break;
case ETHTOOL_GRXCLSRLCNT:
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
break;
spin_lock_bh(&adapter->fdir_fltr_lock);
cmd->rule_cnt = adapter->fdir_active_fltr;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
index 9eb9f73f6adf..d31bd923ba8c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
@@ -6,12 +6,25 @@
struct iavf_adapter;
-/* State of Flow Director filter */
+/* State of Flow Director filter
+ *
+ * *_REQUEST states are used to mark filter to be sent to PF driver to perform
+ * an action (either add or delete filter). *_PENDING states are an indication
+ * that request was sent to PF and the driver is waiting for response.
+ *
+ * Both DELETE and DISABLE states are being used to delete a filter in PF.
+ * The difference is that after a successful response filter in DEL_PENDING
+ * state is being deleted from VF driver as well and filter in DIS_PENDING state
+ * is being changed to INACTIVE state.
+ */
enum iavf_fdir_fltr_state_t {
IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */
IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */
IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */
IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */
+ IAVF_FDIR_FLTR_DIS_REQUEST, /* Filter scheduled to be disabled */
+ IAVF_FDIR_FLTR_DIS_PENDING, /* Filter pending disable by the PF */
+ IAVF_FDIR_FLTR_INACTIVE, /* Filter inactive on link down */
IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index c862ebcd2e39..e8d5b889addc 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -277,27 +277,6 @@ void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem)
}
/**
- * iavf_lock_timeout - try to lock mutex but give up after timeout
- * @lock: mutex that should be locked
- * @msecs: timeout in msecs
- *
- * Returns 0 on success, negative on failure
- **/
-static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
-{
- unsigned int wait, delay = 10;
-
- for (wait = 0; wait < msecs; wait += delay) {
- if (mutex_trylock(lock))
- return 0;
-
- msleep(delay);
- }
-
- return -1;
-}
-
-/**
* iavf_schedule_reset - Set the flags and schedule a reset event
* @adapter: board private structure
* @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED
@@ -1353,18 +1332,20 @@ static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
**/
static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
{
- struct iavf_fdir_fltr *fdir, *fdirtmp;
+ struct iavf_fdir_fltr *fdir;
/* remove all Flow Director filters */
spin_lock_bh(&adapter->fdir_fltr_lock);
- list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
- list) {
+ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
- list_del(&fdir->list);
- kfree(fdir);
- adapter->fdir_active_fltr--;
- } else {
- fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ /* Cancel a request, keep filter as inactive */
+ fdir->state = IAVF_FDIR_FLTR_INACTIVE;
+ } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+ fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
+ /* Disable filters which are active or have a pending
+ * request to PF to be added
+ */
+ fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -4113,6 +4094,33 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
/**
+ * iavf_restore_fdir_filters
+ * @adapter: board private structure
+ *
+ * Restore existing FDIR filters when VF netdev comes back up.
+ **/
+static void iavf_restore_fdir_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *f;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(f, &adapter->fdir_list_head, list) {
+ if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
+ /* Cancel a request, keep filter as active */
+ f->state = IAVF_FDIR_FLTR_ACTIVE;
+ } else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING ||
+ f->state == IAVF_FDIR_FLTR_INACTIVE) {
+ /* Add filters which are inactive or have a pending
+ * request to PF to be deleted
+ */
+ f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+}
+
+/**
* iavf_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -4179,8 +4187,9 @@ static int iavf_open(struct net_device *netdev)
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- /* Restore VLAN filters that were removed with IFF_DOWN */
+ /* Restore filters that were removed with IFF_DOWN */
iavf_restore_filters(adapter);
+ iavf_restore_fdir_filters(adapter);
iavf_configure(adapter);
@@ -4311,6 +4320,49 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+/**
+ * iavf_disable_fdir - disable Flow Director and clear existing filters
+ * @adapter: board private structure
+ **/
+static void iavf_disable_fdir(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir, *fdirtmp;
+ bool del_filters = false;
+
+ adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED;
+
+ /* remove all Flow Director filters */
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
+ fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
+ /* Delete filters not registered in PF */
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+ fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
+ fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
+ /* Filters registered in PF, schedule their deletion */
+ fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ del_filters = true;
+ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+ /* Request to delete filter already sent to PF, change
+ * state to DEL_PENDING to delete filter after PF's
+ * response, not set as INACTIVE
+ */
+ fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (del_filters) {
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+}
+
#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_RX | \
@@ -4336,6 +4388,13 @@ static int iavf_set_features(struct net_device *netdev,
((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS)))
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
+ if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) {
+ if (features & NETIF_F_NTUPLE)
+ adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
+ else
+ iavf_disable_fdir(adapter);
+ }
+
return 0;
}
@@ -4685,6 +4744,9 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
features = iavf_fix_netdev_vlan_features(adapter, features);
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ features &= ~NETIF_F_NTUPLE;
+
return iavf_fix_strip_features(adapter, features);
}
@@ -4802,6 +4864,12 @@ int iavf_process_config(struct iavf_adapter *adapter)
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (FDIR_FLTR_SUPPORT(adapter)) {
+ netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->features |= NETIF_F_NTUPLE;
+ adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
+ }
+
netdev->priv_flags |= IFF_UNICAST_FLT;
/* Do not turn on offloads when they are requested to be turned off.
@@ -4826,34 +4894,6 @@ int iavf_process_config(struct iavf_adapter *adapter)
}
/**
- * iavf_shutdown - Shutdown the device in preparation for a reboot
- * @pdev: pci device structure
- **/
-static void iavf_shutdown(struct pci_dev *pdev)
-{
- struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
- struct net_device *netdev = adapter->netdev;
-
- netif_device_detach(netdev);
-
- if (netif_running(netdev))
- iavf_close(netdev);
-
- if (iavf_lock_timeout(&adapter->crit_lock, 5000))
- dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__);
- /* Prevent the watchdog from running. */
- iavf_change_state(adapter, __IAVF_REMOVE);
- adapter->aq_required = 0;
- mutex_unlock(&adapter->crit_lock);
-
-#ifdef CONFIG_PM
- pci_save_state(pdev);
-
-#endif
- pci_disable_device(pdev);
-}
-
-/**
* iavf_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in iavf_pci_tbl
@@ -5063,16 +5103,21 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
**/
static void iavf_remove(struct pci_dev *pdev)
{
- struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
struct iavf_cloud_filter *cf, *cftmp;
struct iavf_adv_rss *rss, *rsstmp;
struct iavf_mac_filter *f, *ftmp;
+ struct iavf_adapter *adapter;
struct net_device *netdev;
struct iavf_hw *hw;
- netdev = adapter->netdev;
+ /* Don't proceed with remove if netdev is already freed */
+ netdev = pci_get_drvdata(pdev);
+ if (!netdev)
+ return;
+
+ adapter = iavf_pdev_to_adapter(pdev);
hw = &adapter->hw;
if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
@@ -5184,11 +5229,25 @@ static void iavf_remove(struct pci_dev *pdev)
destroy_workqueue(adapter->wq);
+ pci_set_drvdata(pdev, NULL);
+
free_netdev(netdev);
pci_disable_device(pdev);
}
+/**
+ * iavf_shutdown - Shutdown the device in preparation for a reboot
+ * @pdev: pci device structure
+ **/
+static void iavf_shutdown(struct pci_dev *pdev)
+{
+ iavf_remove(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
static struct pci_driver iavf_driver = {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 64c4443dbef9..2d9366be0ec5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1735,8 +1735,8 @@ void iavf_add_fdir_filter(struct iavf_adapter *adapter)
**/
void iavf_del_fdir_filter(struct iavf_adapter *adapter)
{
+ struct virtchnl_fdir_del f = {};
struct iavf_fdir_fltr *fdir;
- struct virtchnl_fdir_del f;
bool process_fltr = false;
int len;
@@ -1753,11 +1753,16 @@ void iavf_del_fdir_filter(struct iavf_adapter *adapter)
list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
process_fltr = true;
- memset(&f, 0, len);
f.vsi_id = fdir->vc_add_msg.vsi_id;
f.flow_id = fdir->flow_id;
fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
break;
+ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
+ process_fltr = true;
+ f.vsi_id = fdir->vc_add_msg.vsi_id;
+ f.flow_id = fdir->flow_id;
+ fdir->state = IAVF_FDIR_FLTR_DIS_PENDING;
+ break;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -1902,6 +1907,48 @@ static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
}
/**
+ * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset
+ * @adapter: private adapter structure
+ *
+ * Called after a reset to re-add all FDIR filters and delete some of them
+ * if they were pending to be deleted.
+ */
+static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *f, *ftmp;
+ bool add_filters = false;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) {
+ if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
+ f->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+ f->state == IAVF_FDIR_FLTR_ACTIVE) {
+ /* All filters and requests have been removed in PF,
+ * restore them
+ */
+ f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ add_filters = true;
+ } else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
+ f->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+ /* Link down state, leave filters as inactive */
+ f->state = IAVF_FDIR_FLTR_INACTIVE;
+ } else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST ||
+ f->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+ /* Delete filters that were pending to be deleted, the
+ * list on PF is already cleared after a reset
+ */
+ list_del(&f->list);
+ kfree(f);
+ adapter->fdir_active_fltr--;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (add_filters)
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+}
+
+/**
* iavf_virtchnl_completion
* @adapter: adapter structure
* @v_opcode: opcode sent by PF
@@ -2078,7 +2125,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fdir, &adapter->fdir_list_head,
list) {
- if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING ||
+ fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
iavf_stat_str(&adapter->hw,
@@ -2214,6 +2262,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ iavf_activate_fdir_filters(adapter);
+
iavf_parse_vf_resource_msg(adapter);
/* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
@@ -2390,7 +2440,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
list) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
- if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
+ if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
+ del_fltr->status ==
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
fdir->loc);
list_del(&fdir->list);
@@ -2402,6 +2454,17 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
del_fltr->status);
iavf_print_fdir_fltr(adapter, fdir);
}
+ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+ if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
+ del_fltr->status ==
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
+ fdir->state = IAVF_FDIR_FLTR_INACTIVE;
+ } else {
+ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n",
+ del_fltr->status);
+ iavf_print_fdir_fltr(adapter, fdir);
+ }
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 552970c7dec0..a9bdf3283a85 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -1193,6 +1193,13 @@ int octep_device_setup(struct octep_device *oct)
if (ret)
return ret;
+ INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task);
+ INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task);
+ INIT_DELAYED_WORK(&oct->intr_poll_task, octep_intr_poll_task);
+ oct->poll_non_ioq_intr = true;
+ queue_delayed_work(octep_wq, &oct->intr_poll_task,
+ msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
+
atomic_set(&oct->hb_miss_cnt, 0);
INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task);
@@ -1258,7 +1265,8 @@ static bool get_fw_ready_status(struct pci_dev *pdev)
pci_read_config_byte(pdev, (pos + 8), &status);
dev_info(&pdev->dev, "Firmware ready status = %u\n", status);
- return status;
+#define FW_STATUS_READY 1ULL
+ return status == FW_STATUS_READY;
}
return false;
}
@@ -1325,21 +1333,18 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_octep_config;
}
- octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
- &octep_dev->conf->fw_info);
+ err = octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
+ &octep_dev->conf->fw_info);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get firmware info\n");
+ goto register_dev_err;
+ }
dev_info(&octep_dev->pdev->dev, "Heartbeat interval %u msecs Heartbeat miss count %u\n",
octep_dev->conf->fw_info.hb_interval,
octep_dev->conf->fw_info.hb_miss_count);
queue_delayed_work(octep_wq, &octep_dev->hb_task,
msecs_to_jiffies(octep_dev->conf->fw_info.hb_interval));
- INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task);
- INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task);
- INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task);
- octep_dev->poll_non_ioq_intr = true;
- queue_delayed_work(octep_wq, &octep_dev->intr_poll_task,
- msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
-
netdev->netdev_ops = &octep_netdev_ops;
octep_set_ethtool_ops(netdev);
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index af21e2030cff..4728ba34b0e3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -373,6 +373,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* Disable forward pause to driver */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
/* Enable channel mask for all LMACS */
if (is_dev_rpm2(rpm))
rpm_write(rpm, lmac_id, RPM2_CMR_CHAN_MSK_OR, 0xffff);
@@ -616,12 +621,10 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
if (rx_pause) {
cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
- RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
- RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
} else {
cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
- RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
- RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
}
if (tx_pause) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 62780d8b4f9a..21b5d71c1e37 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -538,7 +538,7 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
if (!rvu_dl->devlink_wq)
- goto err;
+ return -ENOMEM;
INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
@@ -546,9 +546,6 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
return 0;
-err:
- rvu_nix_health_reporters_destroy(rvu_dl);
- return -ENOMEM;
}
static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index f65805860c8d..0bcf3e559280 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -671,6 +671,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
int blkaddr, ucast_idx, index;
struct nix_rx_action action = { 0 };
u64 relaxed_mask;
+ u8 flow_key_alg;
if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
return;
@@ -701,6 +702,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
action.op = NIX_RX_ACTIONOP_UCAST;
}
+ flow_key_alg = action.flow_key_alg;
+
/* RX_ACTION set to MCAST for CGX PF's */
if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
@@ -740,7 +743,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
req.vf = pcifunc;
req.index = action.index;
req.match_id = action.match_id;
- req.flow_key_alg = action.flow_key_alg;
+ req.flow_key_alg = flow_key_alg;
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
@@ -854,6 +857,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u8 mac_addr[ETH_ALEN] = { 0 };
struct nix_rx_action action = { 0 };
struct rvu_pfvf *pfvf;
+ u8 flow_key_alg;
u16 vf_func;
/* Only CGX PF/VF can add allmulticast entry */
@@ -888,6 +892,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
blkaddr, ucast_idx);
+ flow_key_alg = action.flow_key_alg;
if (action.op != NIX_RX_ACTIONOP_RSS) {
*(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_UCAST;
@@ -924,7 +929,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
req.vf = pcifunc | vf_func;
req.index = action.index;
req.match_id = action.match_id;
- req.flow_key_alg = action.flow_key_alg;
+ req.flow_key_alg = flow_key_alg;
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
@@ -990,11 +995,38 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
mutex_unlock(&mcam->lock);
}
+static void npc_update_rx_action_with_alg_idx(struct rvu *rvu, struct nix_rx_action action,
+ struct rvu_pfvf *pfvf, int mcam_index, int blkaddr,
+ int alg_idx)
+
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int bank, op_rss;
+
+ if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_index))
+ return;
+
+ op_rss = (!hw->cap.nix_rx_multicast || !pfvf->use_mce_list);
+
+ bank = npc_get_bank(mcam, mcam_index);
+ mcam_index &= (mcam->banksize - 1);
+
+ /* If Rx action is MCAST update only RSS algorithm index */
+ if (!op_rss) {
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank));
+
+ action.flow_key_alg = alg_idx;
+ }
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank), *(u64 *)&action);
+}
+
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct rvu_hwinfo *hw = rvu->hw;
struct nix_rx_action action;
int blkaddr, index, bank;
struct rvu_pfvf *pfvf;
@@ -1050,15 +1082,16 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
/* If PF's promiscuous entry is enabled,
* Set RSS action for that entry as well
*/
- if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) &&
- is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
- bank = npc_get_bank(mcam, index);
- index &= (mcam->banksize - 1);
+ npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
+ alg_idx);
- rvu_write64(rvu, blkaddr,
- NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
- *(u64 *)&action);
- }
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_ALLMULTI_ENTRY);
+ /* If PF's allmulti entry is enabled,
+ * Set RSS action for that entry as well
+ */
+ npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
+ alg_idx);
}
void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 0c17ebdda148..a57455aebff6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1650,6 +1650,21 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
mutex_unlock(&mbox->lock);
}
+static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
+{
+ int vf;
+
+ /* The AF driver will determine whether to allow the VF netdev or not */
+ if (is_otx2_vf(pfvf->pcifunc))
+ return true;
+
+ /* check if there are any trusted VFs associated with the PF netdev */
+ for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++)
+ if (pfvf->vf_configs[vf].trusted)
+ return true;
+ return false;
+}
+
static void otx2_do_set_rx_mode(struct otx2_nic *pf)
{
struct net_device *netdev = pf->netdev;
@@ -1682,7 +1697,8 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
req->mode |= NIX_RX_MODE_ALLMULTI;
- req->mode |= NIX_RX_MODE_USE_MCE;
+ if (otx2_promisc_use_mce_list(pf))
+ req->mode |= NIX_RX_MODE_USE_MCE;
otx2_sync_mbox_msg(&pf->mbox);
mutex_unlock(&pf->mbox.lock);
@@ -2691,11 +2707,14 @@ static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
pf->vf_configs[vf].trusted = enable;
rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
- if (rc)
+ if (rc) {
pf->vf_configs[vf].trusted = !enable;
- else
+ } else {
netdev_info(pf->netdev, "VF %d is %strusted\n",
vf, enable ? "" : "not ");
+ otx2_set_rx_mode(netdev);
+ }
+
return rc;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b2a5da9739d2..729a11b5fb25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -826,6 +826,7 @@ enum {
MLX5E_STATE_DESTROYING,
MLX5E_STATE_XDP_TX_ENABLED,
MLX5E_STATE_XDP_ACTIVE,
+ MLX5E_STATE_CHANNELS_ACTIVE,
};
struct mlx5e_modify_sq_param {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index 4e923a2874ae..86bf007fd05b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -83,6 +83,9 @@ mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
struct mlx5_flow_spec *spec;
int err;
+ if (IS_ERR(post_act))
+ return PTR_ERR(post_act);
+
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
@@ -111,6 +114,9 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *po
struct mlx5e_post_act_handle *handle;
int err;
+ if (IS_ERR(post_act))
+ return ERR_CAST(post_act);
+
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 655496598c68..161c5190c236 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -121,7 +121,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
- sa_entry->esn_state.esn = esn;
+ if (sa_entry->esn_state.esn_msb)
+ sa_entry->esn_state.esn = esn;
+ else
+ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
+ * the first packet sent using a given SA will contain a sequence
+ * number of 1.
+ */
+ sa_entry->esn_state.esn = max_t(u32, esn, 1);
sa_entry->esn_state.esn_msb = esn_msb;
if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
@@ -335,6 +342,27 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->replay_esn.esn = sa_entry->esn_state.esn;
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
+ switch (x->replay_esn->replay_window) {
+ case 32:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
+ break;
+ case 64:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
+ break;
+ case 128:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
+ break;
+ case 256:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
+ break;
+ default:
+ WARN_ON(true);
+ return;
+ }
}
attrs->dir = x->xso.dir;
@@ -907,9 +935,11 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
return;
mlx5e_accel_ipsec_fs_cleanup(ipsec);
- if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ if (ipsec->netevent_nb.notifier_call) {
unregister_netevent_notifier(&ipsec->netevent_nb);
- if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+ ipsec->netevent_nb.notifier_call = NULL;
+ }
+ if (ipsec->aso)
mlx5e_ipsec_aso_cleanup(ipsec);
destroy_workqueue(ipsec->wq);
kfree(ipsec);
@@ -1018,6 +1048,12 @@ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
}
}
+ if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET &&
+ !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
+ NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1113,14 +1149,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_free = mlx5e_xfrm_free_state,
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
-};
-
-static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
- .xdo_dev_state_add = mlx5e_xfrm_add_state,
- .xdo_dev_state_delete = mlx5e_xfrm_del_state,
- .xdo_dev_state_free = mlx5e_xfrm_free_state,
- .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
- .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
@@ -1138,11 +1166,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
- netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops;
- else
- netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
-
+ netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
netdev->features |= NETIF_F_HW_ESP;
netdev->hw_enc_features |= NETIF_F_HW_ESP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 8f4a37bceaf4..adaea3493193 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -189,11 +189,19 @@ struct mlx5e_ipsec_ft {
u32 refcnt;
};
+struct mlx5e_ipsec_drop {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *fc;
+};
+
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
+ struct mlx5e_ipsec_drop replay;
+ struct mlx5e_ipsec_drop auth;
+ struct mlx5e_ipsec_drop trailer;
};
struct mlx5e_ipsec_miss {
@@ -201,19 +209,6 @@ struct mlx5e_ipsec_miss {
struct mlx5_flow_handle *rule;
};
-struct mlx5e_ipsec_rx {
- struct mlx5e_ipsec_ft ft;
- struct mlx5e_ipsec_miss pol;
- struct mlx5e_ipsec_miss sa;
- struct mlx5e_ipsec_rule status;
- struct mlx5e_ipsec_miss status_drop;
- struct mlx5_fc *status_drop_cnt;
- struct mlx5e_ipsec_fc *fc;
- struct mlx5_fs_chains *chains;
- u8 allow_tunnel_mode : 1;
- struct xarray ipsec_obj_id_map;
-};
-
struct mlx5e_ipsec_tx_create_attr {
int prio;
int pol_level;
@@ -248,6 +243,7 @@ struct mlx5e_ipsec {
struct mlx5_ipsec_fs *roce;
u8 is_uplink_rep: 1;
struct mlx5e_ipsec_mpv_work mpv_work;
+ struct xarray ipsec_obj_id_map;
};
struct mlx5e_ipsec_esn_state {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index f41c976dc33f..c1e89dc77db9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -32,6 +32,22 @@ struct mlx5e_ipsec_tx {
u8 allow_tunnel_mode : 1;
};
+struct mlx5e_ipsec_status_checks {
+ struct mlx5_flow_group *drop_all_group;
+ struct mlx5e_ipsec_drop all;
+};
+
+struct mlx5e_ipsec_rx {
+ struct mlx5e_ipsec_ft ft;
+ struct mlx5e_ipsec_miss pol;
+ struct mlx5e_ipsec_miss sa;
+ struct mlx5e_ipsec_rule status;
+ struct mlx5e_ipsec_status_checks status_drops;
+ struct mlx5e_ipsec_fc *fc;
+ struct mlx5_fs_chains *chains;
+ u8 allow_tunnel_mode : 1;
+};
+
/* IPsec RX flow steering */
static enum mlx5_traffic_types family2tt(u32 family)
{
@@ -128,14 +144,37 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
}
-static int ipsec_status_rule(struct mlx5_core_dev *mdev,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
+static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->status_drops.all.rule);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
+ mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
+}
+
+static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
{
- u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ mlx5_del_flow_rules(rx->status.rule);
+
+ if (rx != ipsec->rx_esw)
+ return;
+
+#ifdef CONFIG_MLX5_ESWITCH
+ mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
+#endif
+}
+
+static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
- struct mlx5_modify_hdr *modify_hdr;
- struct mlx5_flow_handle *fte;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
struct mlx5_flow_spec *spec;
int err;
@@ -143,48 +182,273 @@ static int ipsec_status_rule(struct mlx5_core_dev *mdev,
if (!spec)
return -ENOMEM;
- /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
- MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
- MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
- MLX5_SET(copy_action_in, action, src_offset, 0);
- MLX5_SET(copy_action_in, action, length, 7);
- MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(copy_action_in, action, dst_offset, 24);
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+ sa_entry->ipsec_rule.auth.fc = flow_counter;
- modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
- 1, action);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
- if (IS_ERR(modify_hdr)) {
- err = PTR_ERR(modify_hdr);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
mlx5_core_err(mdev,
- "fail to alloc ipsec copy modify_header_id err=%d\n", err);
- goto out_spec;
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
}
+ sa_entry->ipsec_rule.auth.rule = rule;
- /* create fte */
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt_2;
+ }
+ sa_entry->ipsec_rule.trailer.fc = flow_counter;
+
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule_2;
+ }
+ sa_entry->ipsec_rule.trailer.rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_rule_2:
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc);
+err_cnt_2:
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule);
+err_rule:
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc);
+err_cnt:
+ kvfree(spec);
+ return err;
+}
+
+static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
+ }
+
+ sa_entry->ipsec_rule.replay.rule = rule;
+ sa_entry->ipsec_rule.replay.fc = flow_counter;
+
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+ kvfree(spec);
+ return err;
+}
+
+static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!flow_group_in || !spec) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+ g = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop flow group, err=%d\n", err);
+ goto err_out;
+ }
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
+ }
+
+ rx->status_drops.drop_all_group = g;
+ rx->status_drops.all.rule = rule;
+ rx->status_drops.all.fc = flow_counter;
+
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+ mlx5_destroy_flow_group(g);
+err_out:
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return err;
+}
+
+static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_4, 0);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- flow_act.modify_hdr = modify_hdr;
- fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
- if (IS_ERR(fte)) {
- err = PTR_ERR(fte);
- mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
- goto out;
+ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(ipsec->mdev,
+ "Failed to add ipsec rx status pass rule, err=%d\n", err);
+ goto err_rule;
}
+ rx->status.rule = rule;
kvfree(spec);
- rx->status.rule = fte;
- rx->status.modify_hdr = modify_hdr;
return 0;
-out:
- mlx5_modify_header_dealloc(mdev, modify_hdr);
-out_spec:
+err_rule:
kvfree(spec);
return err;
}
+static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ ipsec_rx_status_pass_destroy(ipsec, rx);
+ ipsec_rx_status_drop_destroy(ipsec, rx);
+}
+
+static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ int err;
+
+ err = ipsec_rx_status_drop_all_create(ipsec, rx);
+ if (err)
+ return err;
+
+ err = ipsec_rx_status_pass_create(ipsec, rx, dest);
+ if (err)
+ goto err_pass_create;
+
+ return 0;
+
+err_pass_create:
+ ipsec_rx_status_drop_destroy(ipsec, rx);
+ return err;
+}
+
static int ipsec_miss_create(struct mlx5_core_dev *mdev,
struct mlx5_flow_table *ft,
struct mlx5e_ipsec_miss *miss,
@@ -333,12 +597,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_destroy_flow_table(rx->ft.sa);
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- if (rx == ipsec->rx_esw) {
- mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
- } else {
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
- }
+ mlx5_ipsec_rx_status_destroy(ipsec, rx);
mlx5_destroy_flow_table(rx->ft.status);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
@@ -419,7 +678,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
return err;
- ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0);
+ ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
@@ -428,10 +687,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
- if (rx == ipsec->rx_esw)
- err = mlx5_esw_ipsec_rx_status_create(ipsec, rx, dest);
- else
- err = ipsec_status_rule(mdev, rx, dest);
+ err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
goto err_add;
@@ -956,13 +1212,22 @@ static void setup_fte_esp(struct mlx5_flow_spec *spec)
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
}
-static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
+static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
{
/* SPI number */
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
+ if (encap) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters.inner_esp_spi);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters.inner_esp_spi, spi);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters.outer_esp_spi);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters.outer_esp_spi, spi);
+ }
}
static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
@@ -1052,29 +1317,48 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
struct mlx5_flow_act *flow_act)
{
enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
- u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_modify_hdr *modify_hdr;
+ u8 num_of_actions = 1;
- MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET);
switch (dir) {
case XFRM_DEV_OFFLOAD_IN:
- MLX5_SET(set_action_in, action, field,
+ MLX5_SET(set_action_in, action[0], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+
+ num_of_actions++;
+ MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2);
+ MLX5_SET(set_action_in, action[1], data, val);
+ MLX5_SET(set_action_in, action[1], offset, 0);
+ MLX5_SET(set_action_in, action[1], length, 32);
+
+ if (type == XFRM_DEV_OFFLOAD_CRYPTO) {
+ num_of_actions++;
+ MLX5_SET(set_action_in, action[2], action_type,
+ MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[2], field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
+ MLX5_SET(set_action_in, action[2], data, 0);
+ MLX5_SET(set_action_in, action[2], offset, 0);
+ MLX5_SET(set_action_in, action[2], length, 32);
+ }
break;
case XFRM_DEV_OFFLOAD_OUT:
- MLX5_SET(set_action_in, action, field,
+ MLX5_SET(set_action_in, action[0], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
break;
default:
return -EINVAL;
}
- MLX5_SET(set_action_in, action, data, val);
- MLX5_SET(set_action_in, action, offset, 0);
- MLX5_SET(set_action_in, action, length, 32);
+ MLX5_SET(set_action_in, action[0], data, val);
+ MLX5_SET(set_action_in, action[0], offset, 0);
+ MLX5_SET(set_action_in, action[0], length, 32);
- modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
+ modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
if (IS_ERR(modify_hdr)) {
mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
PTR_ERR(modify_hdr));
@@ -1321,8 +1605,9 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
else
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
- setup_fte_spi(spec, attrs->spi);
- setup_fte_esp(spec);
+ setup_fte_spi(spec, attrs->spi, attrs->encap);
+ if (!attrs->encap)
+ setup_fte_esp(spec);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1372,6 +1657,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
goto err_add_flow;
}
+ if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
+ err = rx_add_rule_drop_replay(sa_entry, rx);
+ if (err)
+ goto err_add_replay;
+
+ err = rx_add_rule_drop_auth_trailer(sa_entry, rx);
+ if (err)
+ goto err_drop_reason;
+
kvfree(spec);
sa_entry->ipsec_rule.rule = rule;
@@ -1380,6 +1674,13 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
return 0;
+err_drop_reason:
+ if (sa_entry->ipsec_rule.replay.rule) {
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule);
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
+ }
+err_add_replay:
+ mlx5_del_flow_rules(rule);
err_add_flow:
mlx5_fc_destroy(mdev, counter);
err_add_cnt:
@@ -1428,7 +1729,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
- setup_fte_spi(spec, attrs->spi);
+ setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
break;
@@ -1809,8 +2110,11 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int err = 0;
- if (esw)
- down_write(&esw->mode_lock);
+ if (esw) {
+ err = mlx5_esw_lock(esw);
+ if (err)
+ return err;
+ }
if (mdev->num_block_ipsec) {
err = -EBUSY;
@@ -1821,7 +2125,7 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
unlock:
if (esw)
- up_write(&esw->mode_lock);
+ mlx5_esw_unlock(esw);
return err;
}
@@ -1887,6 +2191,17 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
if (ipsec_rule->modify_hdr)
mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+
+ mlx5_del_flow_rules(ipsec_rule->trailer.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc);
+
+ mlx5_del_flow_rules(ipsec_rule->auth.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
+
+ if (ipsec_rule->replay.rule) {
+ mlx5_del_flow_rules(ipsec_rule->replay.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
+ }
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
}
@@ -1957,7 +2272,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
kfree(ipsec->rx_ipv6);
if (ipsec->is_uplink_rep) {
- xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
+ xa_destroy(&ipsec->ipsec_obj_id_map);
mutex_destroy(&ipsec->tx_esw->ft.mutex);
WARN_ON(ipsec->tx_esw->ft.refcnt);
@@ -2020,7 +2335,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
mutex_init(&ipsec->tx_esw->ft.mutex);
mutex_init(&ipsec->rx_esw->ft.mutex);
ipsec->tx_esw->ns = ns_esw;
- xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
+ xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index a91f772dc981..6e00afe4671b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -6,6 +6,8 @@
#include "ipsec.h"
#include "lib/crypto.h"
#include "lib/ipsec_fs_roce.h"
+#include "fs_core.h"
+#include "eswitch.h"
enum {
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
@@ -38,7 +40,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
caps |= MLX5_IPSEC_CAP_CRYPTO;
- if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) {
+ if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
+ (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
+ (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS &&
+ is_mdev_legacy_mode(mdev)))) {
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
reformat_add_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
@@ -95,7 +100,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
MLX5_SET(ipsec_aso, aso_ctx, window_sz,
- attrs->replay_esn.replay_window / 64);
+ attrs->replay_esn.replay_window);
MLX5_SET(ipsec_aso, aso_ctx, mode,
MLX5_IPSEC_ASO_REPLAY_PROTECTION);
}
@@ -559,6 +564,7 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
kfree(aso);
+ ipsec->aso = NULL;
}
static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ea58c6917433..0c87ddb8a7a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2731,6 +2731,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
{
int i;
+ ASSERT_RTNL();
if (chs->ptp) {
mlx5e_ptp_close(chs->ptp);
chs->ptp = NULL;
@@ -3012,17 +3013,29 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
if (mlx5e_is_vport_rep(priv))
mlx5e_rep_activate_channels(priv);
+ set_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
+
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
if (priv->rx_res)
mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels);
}
+static void mlx5e_cancel_tx_timeout_work(struct mlx5e_priv *priv)
+{
+ WARN_ON_ONCE(test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state));
+ if (current_work() != &priv->tx_timeout_work)
+ cancel_work_sync(&priv->tx_timeout_work);
+}
+
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{
if (priv->rx_res)
mlx5e_rx_res_channels_deactivate(priv->rx_res);
+ clear_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
+ mlx5e_cancel_tx_timeout_work(priv);
+
if (mlx5e_is_vport_rep(priv))
mlx5e_rep_deactivate_channels(priv);
@@ -4801,8 +4814,17 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
struct net_device *netdev = priv->netdev;
int i;
- rtnl_lock();
- mutex_lock(&priv->state_lock);
+ /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues
+ * through this flow. However, channel closing flows have to wait for
+ * this work to finish while holding rtnl lock too. So either get the
+ * lock or find that channels are being closed for other reason and
+ * this work is not relevant anymore.
+ */
+ while (!rtnl_trylock()) {
+ if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
+ return;
+ msleep(20);
+ }
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
@@ -4821,7 +4843,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
}
unlock:
- mutex_unlock(&priv->state_lock);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 3ab682bbcf86..1bf7540a65ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1497,7 +1497,7 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch,
rpriv->rep->vport);
- if (dl_port) {
+ if (!IS_ERR(dl_port)) {
SET_NETDEV_DEVLINK_PORT(netdev, dl_port);
mlx5e_rep_vnic_reporter_create(priv, dl_port);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 7ca9e5b86778..4809a66f3491 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -444,6 +444,9 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
struct mlx5e_flow_meter_handle *meter;
enum mlx5e_post_meter_type type;
+ if (IS_ERR(post_act))
+ return PTR_ERR(post_act);
+
meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
if (IS_ERR(meter)) {
mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
@@ -3739,6 +3742,20 @@ out_free:
}
static int
+set_branch_dest_ft(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_post_act *post_act = get_post_action(priv);
+
+ if (IS_ERR(post_act))
+ return PTR_ERR(post_act);
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
+
+ return 0;
+}
+
+static int
alloc_branch_attr(struct mlx5e_tc_flow *flow,
struct mlx5e_tc_act_branch_ctrl *cond,
struct mlx5_flow_attr **cond_attr,
@@ -3761,8 +3778,8 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
break;
case FLOW_ACTION_ACCEPT:
case FLOW_ACTION_PIPE:
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
+ if (set_branch_dest_ft(flow->priv, attr))
+ goto out_err;
break;
case FLOW_ACTION_JUMP:
if (*jump_count) {
@@ -3771,8 +3788,8 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
goto out_err;
}
*jump_count = cond->extval;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
+ if (set_branch_dest_ft(flow->priv, attr))
+ goto out_err;
break;
default:
err = -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 095f31f380fa..190f10aba170 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -21,158 +21,6 @@ enum {
MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL,
};
-static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- mlx5_del_flow_rules(rx->status_drop.rule);
- mlx5_destroy_flow_group(rx->status_drop.group);
- mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
-}
-
-static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
-}
-
-static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_table *ft = rx->ft.status;
- struct mlx5_core_dev *mdev = ipsec->mdev;
- struct mlx5_flow_destination dest = {};
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_handle *rule;
- struct mlx5_fc *flow_counter;
- struct mlx5_flow_spec *spec;
- struct mlx5_flow_group *g;
- u32 *flow_group_in;
- int err = 0;
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!flow_group_in || !spec) {
- err = -ENOMEM;
- goto err_out;
- }
-
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
- g = mlx5_create_flow_group(ft, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- mlx5_core_err(mdev,
- "Failed to add ipsec rx status drop flow group, err=%d\n", err);
- goto err_out;
- }
-
- flow_counter = mlx5_fc_create(mdev, false);
- if (IS_ERR(flow_counter)) {
- err = PTR_ERR(flow_counter);
- mlx5_core_err(mdev,
- "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
- goto err_cnt;
- }
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
- rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- mlx5_core_err(mdev,
- "Failed to add ipsec rx status drop rule, err=%d\n", err);
- goto err_rule;
- }
-
- rx->status_drop.group = g;
- rx->status_drop.rule = rule;
- rx->status_drop_cnt = flow_counter;
-
- kvfree(flow_group_in);
- kvfree(spec);
- return 0;
-
-err_rule:
- mlx5_fc_destroy(mdev, flow_counter);
-err_cnt:
- mlx5_destroy_flow_group(g);
-err_out:
- kvfree(flow_group_in);
- kvfree(spec);
- return err;
-}
-
-static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
-{
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_spec *spec;
- int err;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
-
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- misc_parameters_2.ipsec_syndrome);
- MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.ipsec_syndrome, 0);
- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
- flow_act.flags = FLOW_ACT_NO_APPEND;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- mlx5_core_warn(ipsec->mdev,
- "Failed to add ipsec rx status pass rule, err=%d\n", err);
- goto err_rule;
- }
-
- rx->status.rule = rule;
- kvfree(spec);
- return 0;
-
-err_rule:
- kvfree(spec);
- return err;
-}
-
-void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- esw_ipsec_rx_status_pass_destroy(ipsec, rx);
- esw_ipsec_rx_status_drop_destroy(ipsec, rx);
-}
-
-int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
-{
- int err;
-
- err = esw_ipsec_rx_status_drop_create(ipsec, rx);
- if (err)
- return err;
-
- err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest);
- if (err)
- goto err_pass_create;
-
- return 0;
-
-err_pass_create:
- esw_ipsec_rx_status_drop_destroy(ipsec, rx);
- return err;
-}
-
void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr)
{
@@ -202,7 +50,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
u32 mapped_id;
int err;
- err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
+ err = xa_alloc_bh(&ipsec->ipsec_obj_id_map, &mapped_id,
xa_mk_value(sa_entry->ipsec_obj_id),
XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
if (err)
@@ -233,7 +81,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
return 0;
err_header_alloc:
- xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
+ xa_erase_bh(&ipsec->ipsec_obj_id_map, mapped_id);
return err;
}
@@ -242,7 +90,7 @@ void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
if (sa_entry->rx_mapped_id)
- xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
+ xa_erase_bh(&ipsec->ipsec_obj_id_map,
sa_entry->rx_mapped_id);
}
@@ -252,7 +100,7 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
struct mlx5e_ipsec *ipsec = priv->ipsec;
void *val;
- val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
+ val = xa_load(&ipsec->ipsec_obj_id_map, id);
if (!val)
return -ENOENT;
@@ -304,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
xa_for_each(&esw->offloads.vport_reps, i, rep) {
rpriv = rep->rep_data[REP_ETH].priv;
- if (!rpriv || !rpriv->netdev)
+ if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems))
continue;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
index 0c90f7a8b0d3..ac9c65b89166 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -8,11 +8,6 @@ struct mlx5e_ipsec;
struct mlx5e_ipsec_sa_entry;
#ifdef CONFIG_MLX5_ESWITCH
-void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx);
-int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest);
void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr);
int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
@@ -26,16 +21,6 @@ void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
#else
-static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx) {}
-
-static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
-{
- return -EINVAL;
-}
-
static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr) {}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 8d0b915a3121..3047d7015c52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1463,7 +1463,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
{
int err;
- lockdep_assert_held(&esw->mode_lock);
+ devl_assert_locked(priv_to_devlink(esw->dev));
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
@@ -1531,7 +1531,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
if (toggle_lag)
mlx5_lag_disable_change(esw->dev);
- down_write(&esw->mode_lock);
if (!mlx5_esw_is_fdb_created(esw)) {
ret = mlx5_eswitch_enable_locked(esw, num_vfs);
} else {
@@ -1554,8 +1553,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
}
}
- up_write(&esw->mode_lock);
-
if (toggle_lag)
mlx5_lag_enable_change(esw->dev);
@@ -1569,12 +1566,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
return;
devl_assert_locked(priv_to_devlink(esw->dev));
- down_write(&esw->mode_lock);
/* If driver is unloaded, this function is called twice by remove_one()
* and mlx5_unload(). Prevent the second call.
*/
if (!esw->esw_funcs.num_vfs && !esw->esw_funcs.num_ec_vfs && !clear_vf)
- goto unlock;
+ return;
esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
@@ -1603,9 +1599,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
esw->esw_funcs.num_vfs = 0;
else
esw->esw_funcs.num_ec_vfs = 0;
-
-unlock:
- up_write(&esw->mode_lock);
}
/* Free resources for corresponding eswitch mode. It is called by devlink
@@ -1647,10 +1640,8 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
devl_assert_locked(priv_to_devlink(esw->dev));
mlx5_lag_disable_change(esw->dev);
- down_write(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw);
esw->mode = MLX5_ESWITCH_LEGACY;
- up_write(&esw->mode_lock);
mlx5_lag_enable_change(esw->dev);
}
@@ -2254,8 +2245,13 @@ bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
if (!mlx5_esw_allowed(esw))
return true;
- if (down_read_trylock(&esw->mode_lock) != 0)
+ if (down_read_trylock(&esw->mode_lock) != 0) {
+ if (esw->eswitch_operation_in_progress) {
+ up_read(&esw->mode_lock);
+ return false;
+ }
return true;
+ }
return false;
}
@@ -2312,7 +2308,8 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
if (down_write_trylock(&esw->mode_lock) == 0)
return -EINVAL;
- if (atomic64_read(&esw->user_count) > 0) {
+ if (esw->eswitch_operation_in_progress ||
+ atomic64_read(&esw->user_count) > 0) {
up_write(&esw->mode_lock);
return -EBUSY;
}
@@ -2320,6 +2317,18 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
return esw->mode;
}
+int mlx5_esw_lock(struct mlx5_eswitch *esw)
+{
+ down_write(&esw->mode_lock);
+
+ if (esw->eswitch_operation_in_progress) {
+ up_write(&esw->mode_lock);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
/**
* mlx5_esw_unlock() - Release write lock on esw mode lock
* @esw: eswitch device.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 37ab66e7b403..b674b57d05aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -383,6 +383,7 @@ struct mlx5_eswitch {
struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
u16 enabled_ipsec_vf_count;
+ bool eswitch_operation_in_progress;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -827,6 +828,7 @@ void mlx5_esw_release(struct mlx5_core_dev *dev);
void mlx5_esw_get(struct mlx5_core_dev *dev);
void mlx5_esw_put(struct mlx5_core_dev *dev);
int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
+int mlx5_esw_lock(struct mlx5_eswitch *esw);
void mlx5_esw_unlock(struct mlx5_eswitch *esw);
void esw_vport_change_handle_locked(struct mlx5_vport *vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 88236e75fd90..bb8bcb448ae9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3653,14 +3653,18 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
struct net *devl_net, *netdev_net;
- struct mlx5_eswitch *esw;
-
- esw = mlx5_devlink_eswitch_nocheck_get(devlink);
- netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
- devl_net = devlink_net(devlink);
+ bool ret = false;
- return net_eq(devl_net, netdev_net);
+ mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
+ if (dev->mlx5e_res.uplink_netdev) {
+ netdev_net = dev_net(dev->mlx5e_res.uplink_netdev);
+ devl_net = devlink_net(devlink);
+ ret = net_eq(devl_net, netdev_net);
+ }
+ mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
+ return ret;
}
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
@@ -3733,13 +3737,16 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
goto unlock;
}
+ esw->eswitch_operation_in_progress = true;
+ up_write(&esw->mode_lock);
+
mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change mode while devlink traps are active");
err = -EOPNOTSUPP;
- goto unlock;
+ goto skip;
}
err = esw_offloads_start(esw, extack);
} else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
@@ -3749,6 +3756,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
err = -EINVAL;
}
+skip:
+ down_write(&esw->mode_lock);
+ esw->eswitch_operation_in_progress = false;
unlock:
mlx5_esw_unlock(esw);
enable_lag:
@@ -3759,16 +3769,12 @@ enable_lag:
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct mlx5_eswitch *esw;
- int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_read(&esw->mode_lock);
- err = esw_mode_to_devlink(esw->mode, mode);
- up_read(&esw->mode_lock);
- return err;
+ return esw_mode_to_devlink(esw->mode, mode);
}
static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
@@ -3862,11 +3868,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (err)
goto out;
+ esw->eswitch_operation_in_progress = true;
+ up_write(&esw->mode_lock);
+
err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
- if (err)
- goto out;
+ if (!err)
+ esw->offloads.inline_mode = mlx5_mode;
- esw->offloads.inline_mode = mlx5_mode;
+ down_write(&esw->mode_lock);
+ esw->eswitch_operation_in_progress = false;
up_write(&esw->mode_lock);
return 0;
@@ -3878,16 +3888,12 @@ out:
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
{
struct mlx5_eswitch *esw;
- int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_read(&esw->mode_lock);
- err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
- up_read(&esw->mode_lock);
- return err;
+ return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
@@ -3969,6 +3975,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
goto unlock;
}
+ esw->eswitch_operation_in_progress = true;
+ up_write(&esw->mode_lock);
+
esw_destroy_offloads_fdb_tables(esw);
esw->offloads.encap = encap;
@@ -3982,6 +3991,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
(void)esw_create_offloads_fdb_tables(esw);
}
+ down_write(&esw->mode_lock);
+ esw->eswitch_operation_in_progress = false;
+
unlock:
up_write(&esw->mode_lock);
return err;
@@ -3996,9 +4008,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_read(&esw->mode_lock);
*encap = esw->offloads.encap;
- up_read(&esw->mode_lock);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index b568988e92e3..c4e19d627da2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -325,6 +325,29 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
mlx5_core_err(dev, "Failed to reload FW tracer\n");
}
+#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
+static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
+{
+ struct pci_dev *bridge = dev->pdev->bus->self;
+ u16 reg16;
+ int err;
+
+ if (!bridge)
+ return -EOPNOTSUPP;
+
+ err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &reg16);
+ if (err)
+ return err;
+
+ if ((reg16 & PCI_EXP_SLTCTL_HPIE) && (reg16 & PCI_EXP_SLTCTL_DLLSCE)) {
+ mlx5_core_warn(dev, "FW reset is not supported as HotPlug is enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+#endif
+
static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
@@ -357,6 +380,12 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
return false;
}
+#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
+ err = mlx5_check_hotplug_interrupt(dev);
+ if (err)
+ return false;
+#endif
+
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
if (err)
return false;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 65e20693c549..33f4f58ee51c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -933,6 +933,7 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
p_dma->virt_addr = NULL;
}
kfree(p_mngr->ilt_shadow);
+ p_mngr->ilt_shadow = NULL;
}
static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index 6f2fa2a42770..1822f2ad8f0d 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -30,6 +30,8 @@
#define QCASPI_MAX_REGS 0x20
+#define QCASPI_RX_MAX_FRAMES 4
+
static const u16 qcaspi_spi_regs[] = {
SPI_REG_BFR_SIZE,
SPI_REG_WRBUF_SPC_AVA,
@@ -252,9 +254,9 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
{
struct qcaspi *qca = netdev_priv(dev);
- ring->rx_max_pending = 4;
+ ring->rx_max_pending = QCASPI_RX_MAX_FRAMES;
ring->tx_max_pending = TX_RING_MAX_LEN;
- ring->rx_pending = 4;
+ ring->rx_pending = QCASPI_RX_MAX_FRAMES;
ring->tx_pending = qca->txr.count;
}
@@ -263,22 +265,21 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- const struct net_device_ops *ops = dev->netdev_ops;
struct qcaspi *qca = netdev_priv(dev);
- if ((ring->rx_pending) ||
+ if (ring->rx_pending != QCASPI_RX_MAX_FRAMES ||
(ring->rx_mini_pending) ||
(ring->rx_jumbo_pending))
return -EINVAL;
- if (netif_running(dev))
- ops->ndo_stop(dev);
+ if (qca->spi_thread)
+ kthread_park(qca->spi_thread);
qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN);
- if (netif_running(dev))
- ops->ndo_open(dev);
+ if (qca->spi_thread)
+ kthread_unpark(qca->spi_thread);
return 0;
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index bec723028e96..5f3c11fb3fa2 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -580,6 +580,18 @@ qcaspi_spi_thread(void *data)
netdev_info(qca->net_dev, "SPI thread created\n");
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_park()) {
+ netif_tx_disable(qca->net_dev);
+ netif_carrier_off(qca->net_dev);
+ qcaspi_flush_tx_ring(qca);
+ kthread_parkme();
+ if (qca->sync == QCASPI_SYNC_READY) {
+ netif_carrier_on(qca->net_dev);
+ netif_wake_queue(qca->net_dev);
+ }
+ continue;
+ }
+
if ((qca->intr_req == qca->intr_svc) &&
!qca->txr.skb[qca->txr.head])
schedule();
@@ -608,11 +620,17 @@ qcaspi_spi_thread(void *data)
if (intr_cause & SPI_INT_CPU_ON) {
qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON);
+ /* Frame decoding in progress */
+ if (qca->frm_handle.state != qca->frm_handle.init)
+ qca->net_dev->stats.rx_dropped++;
+
+ qcafrm_fsm_init_spi(&qca->frm_handle);
+ qca->stats.device_reset++;
+
/* not synced. */
if (qca->sync != QCASPI_SYNC_READY)
continue;
- qca->stats.device_reset++;
netif_wake_queue(qca->net_dev);
netif_carrier_on(qca->net_dev);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index 2cd6fce5c993..9e40c28d453a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -59,26 +59,19 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
return -ENODEV;
}
- if (!of_device_is_compatible(np, "loongson, pci-gmac")) {
- pr_info("dwmac_loongson_pci: Incompatible OF node\n");
- return -ENODEV;
- }
-
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
return -ENOMEM;
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
plat->mdio_node = of_get_child_by_name(np, "mdio");
if (plat->mdio_node) {
dev_info(&pdev->dev, "Found MDIO subnode\n");
-
- plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
- sizeof(*plat->mdio_bus_data),
- GFP_KERNEL);
- if (!plat->mdio_bus_data) {
- ret = -ENOMEM;
- goto err_put_node;
- }
plat->mdio_bus_data->needs_reset = true;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index d3bf42d0fceb..31631e3f89d0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -34,6 +34,7 @@
#define RGMII_CONFIG_LOOPBACK_EN BIT(2)
#define RGMII_CONFIG_PROG_SWAP BIT(1)
#define RGMII_CONFIG_DDR_MODE BIT(0)
+#define RGMII_CONFIG_SGMII_CLK_DVDR GENMASK(18, 10)
/* SDCC_HC_REG_DLL_CONFIG fields */
#define SDCC_DLL_CONFIG_DLL_RST BIT(30)
@@ -78,6 +79,8 @@
#define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14)
#define ETHQOS_MAC_CTRL_PORT_SEL BIT(15)
+#define SGMII_10M_RX_CLK_DVDR 0x31
+
struct ethqos_emac_por {
unsigned int offset;
unsigned int value;
@@ -598,6 +601,9 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
return 0;
}
+/* On interface toggle MAC registers gets reset.
+ * Configure MAC block for SGMII on ethernet phy link up
+ */
static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
{
int val;
@@ -617,6 +623,10 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
case SPEED_10:
val |= ETHQOS_MAC_CTRL_PORT_SEL;
val &= ~ETHQOS_MAC_CTRL_SPEED_MODE;
+ rgmii_updatel(ethqos, RGMII_CONFIG_SGMII_CLK_DVDR,
+ FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR,
+ SGMII_10M_RX_CLK_DVDR),
+ RGMII_IO_MACRO_CONFIG);
break;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index fa9e7e7040b9..0542cfd1817e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -591,7 +591,11 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->parent = priv->device;
err = of_mdiobus_register(new_bus, mdio_node);
- if (err != 0) {
+ if (err == -ENODEV) {
+ err = 0;
+ dev_info(dev, "MDIO bus is disabled\n");
+ goto bus_register_fail;
+ } else if (err) {
dev_err_probe(dev, err, "Cannot register the MDIO bus\n");
goto bus_register_fail;
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 508d9a392ab1..f575f225d417 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -281,8 +281,10 @@ static int __team_options_register(struct team *team,
return 0;
inst_rollback:
- for (i--; i >= 0; i--)
+ for (i--; i >= 0; i--) {
__team_option_inst_del_option(team, dst_opts[i]);
+ list_del(&dst_opts[i]->list);
+ }
i = option_count;
alloc_rollback:
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 6902e97719d1..11c80555d975 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -968,9 +968,12 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
{
- /* Downstream devices need to be in D0 state before enabling PCI PM substates */
+ /*
+ * Downstream devices need to be in D0 state before enabling PCI PM
+ * substates.
+ */
pci_set_power_state(pdev, PCI_D0);
- pci_enable_link_state(pdev, PCIE_LINK_STATE_ALL);
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
return 0;
}
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index d45e7b8dc530..8b34ccff073a 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -80,13 +80,49 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_LPC, system_bus_quirk);
+/*
+ * Some Loongson PCIe ports have hardware limitations on their Maximum Read
+ * Request Size. They can't handle anything larger than this. Sane
+ * firmware will set proper MRRS at boot, so we only need no_inc_mrrs for
+ * bridges. However, some MIPS Loongson firmware doesn't set MRRS properly,
+ * so we have to enforce maximum safe MRRS, which is 256 bytes.
+ */
+#ifdef CONFIG_MIPS
+static void loongson_set_min_mrrs_quirk(struct pci_dev *pdev)
+{
+ struct pci_bus *bus = pdev->bus;
+ struct pci_dev *bridge;
+ static const struct pci_device_id bridge_devids[] = {
+ { PCI_VDEVICE(LOONGSON, DEV_LS2K_PCIE_PORT0) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT0) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT1) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT2) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT3) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT4) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT5) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT6) },
+ { 0, },
+ };
+
+ /* look for the matching bridge */
+ while (!pci_is_root_bus(bus)) {
+ bridge = bus->self;
+ bus = bus->parent;
+
+ if (pci_match_id(bridge_devids, bridge)) {
+ if (pcie_get_readrq(pdev) > 256) {
+ pci_info(pdev, "limiting MRRS to 256\n");
+ pcie_set_readrq(pdev, 256);
+ }
+ break;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_set_min_mrrs_quirk);
+#endif
+
static void loongson_mrrs_quirk(struct pci_dev *pdev)
{
- /*
- * Some Loongson PCIe ports have h/w limitations of maximum read
- * request size. They can't handle anything larger than this. So
- * force this limit on any devices attached under these ports.
- */
struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
bridge->no_inc_mrrs = 1;
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 94ba61fe1c44..0452cbc362ee 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -751,7 +751,7 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
if (!(features & VMD_FEAT_BIOS_PM_QUIRK))
return 0;
- pci_enable_link_state(pdev, PCIE_LINK_STATE_ALL);
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
if (!pos)
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 601129772b2d..5b1f271c6034 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -512,15 +512,12 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
if (pass && dev->subordinate) {
check_hotplug_bridge(slot, dev);
pcibios_resource_survey_bus(dev->subordinate);
- if (pci_is_root_bus(bus))
- __pci_bus_size_bridges(dev->subordinate, &add_list);
+ __pci_bus_size_bridges(dev->subordinate,
+ &add_list);
}
}
}
- if (pci_is_root_bus(bus))
- __pci_bus_assign_resources(bus, &add_list, NULL);
- else
- pci_assign_unassigned_bridge_resources(bus->self);
+ __pci_bus_assign_resources(bus, &add_list, NULL);
}
acpiphp_sanitize_bus(bus);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 50b04ae5c394..5dab531c8654 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1041,7 +1041,7 @@ static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
return bridge->link_state;
}
-static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked)
{
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
@@ -1060,7 +1060,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
return -EPERM;
}
- if (sem)
+ if (!locked)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
if (state & PCIE_LINK_STATE_L0S)
@@ -1082,7 +1082,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
link->clkpm_disable = 1;
pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
- if (sem)
+ if (!locked)
up_read(&pci_bus_sem);
return 0;
@@ -1090,7 +1090,9 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{
- return __pci_disable_link_state(pdev, state, false);
+ lockdep_assert_held_read(&pci_bus_sem);
+
+ return __pci_disable_link_state(pdev, state, true);
}
EXPORT_SYMBOL(pci_disable_link_state_locked);
@@ -1105,21 +1107,11 @@ EXPORT_SYMBOL(pci_disable_link_state_locked);
*/
int pci_disable_link_state(struct pci_dev *pdev, int state)
{
- return __pci_disable_link_state(pdev, state, true);
+ return __pci_disable_link_state(pdev, state, false);
}
EXPORT_SYMBOL(pci_disable_link_state);
-/**
- * pci_enable_link_state - Clear and set the default device link state so that
- * the link may be allowed to enter the specified states. Note that if the
- * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
- * touch the LNKCTL register. Also note that this does not enable states
- * disabled by pci_disable_link_state(). Return 0 or a negative errno.
- *
- * @pdev: PCI device
- * @state: Mask of ASPM link states to enable
- */
-int pci_enable_link_state(struct pci_dev *pdev, int state)
+static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked)
{
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
@@ -1136,7 +1128,8 @@ int pci_enable_link_state(struct pci_dev *pdev, int state)
return -EPERM;
}
- down_read(&pci_bus_sem);
+ if (!locked)
+ down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
link->aspm_default = 0;
if (state & PCIE_LINK_STATE_L0S)
@@ -1157,12 +1150,48 @@ int pci_enable_link_state(struct pci_dev *pdev, int state)
link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
+ if (!locked)
+ up_read(&pci_bus_sem);
return 0;
}
+
+/**
+ * pci_enable_link_state - Clear and set the default device link state so that
+ * the link may be allowed to enter the specified states. Note that if the
+ * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
+ * touch the LNKCTL register. Also note that this does not enable states
+ * disabled by pci_disable_link_state(). Return 0 or a negative errno.
+ *
+ * @pdev: PCI device
+ * @state: Mask of ASPM link states to enable
+ */
+int pci_enable_link_state(struct pci_dev *pdev, int state)
+{
+ return __pci_enable_link_state(pdev, state, false);
+}
EXPORT_SYMBOL(pci_enable_link_state);
+/**
+ * pci_enable_link_state_locked - Clear and set the default device link state
+ * so that the link may be allowed to enter the specified states. Note that if
+ * the BIOS didn't grant ASPM control to the OS, this does nothing because we
+ * can't touch the LNKCTL register. Also note that this does not enable states
+ * disabled by pci_disable_link_state(). Return 0 or a negative errno.
+ *
+ * @pdev: PCI device
+ * @state: Mask of ASPM link states to enable
+ *
+ * Context: Caller holds pci_bus_sem read lock.
+ */
+int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
+{
+ lockdep_assert_held_read(&pci_bus_sem);
+
+ return __pci_enable_link_state(pdev, state, true);
+}
+EXPORT_SYMBOL(pci_enable_link_state_locked);
+
static int pcie_aspm_set_policy(const char *val,
const struct kernel_param *kp)
{
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 014010d03588..847b0dc41293 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -1816,7 +1816,7 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
idx = 0;
while (cmn->dtc[j].counters[idx])
if (++idx == CMN_DT_NUM_COUNTERS)
- goto free_dtms;
+ return -ENOSPC;
}
hw->dtc_idx[j] = idx;
}
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
index f021ec5a70e5..553725e1269c 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
@@ -100,7 +100,7 @@ static void mtk_mipi_tx_pll_disable(struct clk_hw *hw)
static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
- return clamp_val(rate, 50000000, 1600000000);
+ return clamp_val(rate, 125000000, 1600000000);
}
static const struct clk_ops mtk_mipi_tx_pll_ops = {
diff --git a/drivers/phy/sunplus/phy-sunplus-usb2.c b/drivers/phy/sunplus/phy-sunplus-usb2.c
index 0efe74ac9c6a..637a5fbae6d9 100644
--- a/drivers/phy/sunplus/phy-sunplus-usb2.c
+++ b/drivers/phy/sunplus/phy-sunplus-usb2.c
@@ -275,7 +275,7 @@ static int sp_usb_phy_probe(struct platform_device *pdev)
phy = devm_phy_create(&pdev->dev, NULL, &sp_uphy_ops);
if (IS_ERR(phy)) {
- ret = -PTR_ERR(phy);
+ ret = PTR_ERR(phy);
return ret;
}
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index 555b323f45da..bc847d3879f7 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -64,6 +64,7 @@ struct phy_gmii_sel_priv {
u32 num_ports;
u32 reg_offset;
u32 qsgmii_main_ports;
+ bool no_offset;
};
static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
@@ -402,7 +403,8 @@ static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv)
priv->num_ports = size / sizeof(u32);
if (!priv->num_ports)
return -EINVAL;
- priv->reg_offset = __be32_to_cpu(*offset);
+ if (!priv->no_offset)
+ priv->reg_offset = __be32_to_cpu(*offset);
}
if_phys = devm_kcalloc(dev, priv->num_ports,
@@ -471,6 +473,7 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
dev_err(dev, "Failed to get syscon %d\n", ret);
return ret;
}
+ priv->no_offset = true;
}
ret = phy_gmii_sel_init_ports(priv);
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 6fa1735ad7a4..210b0a81b7ec 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -73,10 +73,10 @@ struct intel_vbtn_priv {
bool wakeup_mode;
};
-static void detect_tablet_mode(struct platform_device *device)
+static void detect_tablet_mode(struct device *dev)
{
- struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
- acpi_handle handle = ACPI_HANDLE(&device->dev);
+ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+ acpi_handle handle = ACPI_HANDLE(dev);
unsigned long long vgbs;
acpi_status status;
int m;
@@ -89,6 +89,8 @@ static void detect_tablet_mode(struct platform_device *device)
input_report_switch(priv->switches_dev, SW_TABLET_MODE, m);
m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0;
input_report_switch(priv->switches_dev, SW_DOCK, m);
+
+ input_sync(priv->switches_dev);
}
/*
@@ -134,7 +136,7 @@ static int intel_vbtn_input_setup(struct platform_device *device)
priv->switches_dev->id.bustype = BUS_HOST;
if (priv->has_switches) {
- detect_tablet_mode(device);
+ detect_tablet_mode(&device->dev);
ret = input_register_device(priv->switches_dev);
if (ret)
@@ -198,6 +200,9 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
sparse_keymap_report_event(input_dev, event, val, autorelease);
+
+ /* Some devices need this to report further events */
+ acpi_evaluate_object(handle, "VBDL", NULL, NULL);
}
/*
@@ -352,7 +357,13 @@ static void intel_vbtn_pm_complete(struct device *dev)
static int intel_vbtn_pm_resume(struct device *dev)
{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+
intel_vbtn_pm_complete(dev);
+
+ if (priv->has_switches)
+ detect_tablet_mode(dev);
+
return 0;
}
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 4dfdbfca6841..c66808601fdd 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -590,6 +590,8 @@ static void ips_disable_gpu_turbo(struct ips_driver *ips)
* @ips: IPS driver struct
*
* Check whether the MCP is over its thermal or power budget.
+ *
+ * Returns: %true if the temp or power has exceeded its maximum, else %false
*/
static bool mcp_exceeded(struct ips_driver *ips)
{
@@ -619,6 +621,8 @@ static bool mcp_exceeded(struct ips_driver *ips)
* @cpu: CPU number to check
*
* Check a given CPU's average temp or power is over its limit.
+ *
+ * Returns: %true if the temp or power has exceeded its maximum, else %false
*/
static bool cpu_exceeded(struct ips_driver *ips, int cpu)
{
@@ -645,6 +649,8 @@ static bool cpu_exceeded(struct ips_driver *ips, int cpu)
* @ips: IPS driver struct
*
* Check the MCH temp & power against their maximums.
+ *
+ * Returns: %true if the temp or power has exceeded its maximum, else %false
*/
static bool mch_exceeded(struct ips_driver *ips)
{
@@ -742,12 +748,13 @@ static void update_turbo_limits(struct ips_driver *ips)
* - down (at TDP limit)
* - adjust both CPU and GPU down if possible
*
- cpu+ gpu+ cpu+gpu- cpu-gpu+ cpu-gpu-
-cpu < gpu < cpu+gpu+ cpu+ gpu+ nothing
-cpu < gpu >= cpu+gpu-(mcp<) cpu+gpu-(mcp<) gpu- gpu-
-cpu >= gpu < cpu-gpu+(mcp<) cpu- cpu-gpu+(mcp<) cpu-
-cpu >= gpu >= cpu-gpu- cpu-gpu- cpu-gpu- cpu-gpu-
+ * |cpu+ gpu+ cpu+gpu- cpu-gpu+ cpu-gpu-
+ * cpu < gpu < |cpu+gpu+ cpu+ gpu+ nothing
+ * cpu < gpu >= |cpu+gpu-(mcp<) cpu+gpu-(mcp<) gpu- gpu-
+ * cpu >= gpu < |cpu-gpu+(mcp<) cpu- cpu-gpu+(mcp<) cpu-
+ * cpu >= gpu >=|cpu-gpu- cpu-gpu- cpu-gpu- cpu-gpu-
*
+ * Returns: %0
*/
static int ips_adjust(void *data)
{
@@ -935,11 +942,13 @@ static void monitor_timeout(struct timer_list *t)
* @data: ips driver structure
*
* This is the main function for the IPS driver. It monitors power and
- * tempurature in the MCP and adjusts CPU and GPU power clams accordingly.
+ * temperature in the MCP and adjusts CPU and GPU power clamps accordingly.
*
- * We keep a 5s moving average of power consumption and tempurature. Using
+ * We keep a 5s moving average of power consumption and temperature. Using
* that data, along with CPU vs GPU preference, we adjust the power clamps
* up or down.
+ *
+ * Returns: %0 on success or -errno on error
*/
static int ips_monitor(void *data)
{
@@ -1146,6 +1155,8 @@ static void dump_thermal_info(struct ips_driver *ips)
* Handle temperature limit trigger events, generally by lowering the clamps.
* If we're at a critical limit, we clamp back to the lowest possible value
* to prevent emergency shutdown.
+ *
+ * Returns: IRQ_NONE or IRQ_HANDLED
*/
static irqreturn_t ips_irq_handler(int irq, void *arg)
{
@@ -1293,9 +1304,12 @@ static void ips_debugfs_init(struct ips_driver *ips)
/**
* ips_detect_cpu - detect whether CPU supports IPS
+ * @ips: IPS driver struct
*
* Walk our list and see if we're on a supported CPU. If we find one,
* return the limits for it.
+ *
+ * Returns: the &ips_mcp_limits struct that matches the boot CPU or %NULL
*/
static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
{
@@ -1352,6 +1366,8 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
* monitor and control graphics turbo mode. If we can find them, we can
* enable graphics turbo, otherwise we must disable it to avoid exceeding
* thermal and power limits in the MCP.
+ *
+ * Returns: %true if the required symbols are found, else %false
*/
static bool ips_get_i915_syms(struct ips_driver *ips)
{
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index d0b5fd4137bc..3392ae99ac3f 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -512,10 +512,10 @@ struct tpacpi_quirk {
* Iterates over a quirks list until one is found that matches the
* ThinkPad's vendor, BIOS and EC model.
*
- * Returns 0 if nothing matches, otherwise returns the quirks field of
+ * Returns: %0 if nothing matches, otherwise returns the quirks field of
* the matching &struct tpacpi_quirk entry.
*
- * The match criteria is: vendor, ec and bios much match.
+ * The match criteria is: vendor, ec and bios must match.
*/
static unsigned long __init tpacpi_check_quirks(
const struct tpacpi_quirk *qlist,
@@ -9303,7 +9303,7 @@ static struct tpacpi_battery_driver_data battery_info;
/* ACPI helpers/functions/probes */
-/**
+/*
* This evaluates a ACPI method call specific to the battery
* ACPI extension. The specifics are that an error is marked
* in the 32rd bit of the response, so we just check that here.
diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c
index 82672fcbc2aa..8280baa3254b 100644
--- a/drivers/soundwire/intel_ace2x.c
+++ b/drivers/soundwire/intel_ace2x.c
@@ -23,8 +23,9 @@
static void intel_shim_vs_init(struct sdw_intel *sdw)
{
void __iomem *shim_vs = sdw->link_res->shim_vs;
- u16 act = 0;
+ u16 act;
+ act = intel_readw(shim_vs, SDW_SHIM2_INTEL_VS_ACTMCTL);
u16p_replace_bits(&act, 0x1, SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS);
act |= SDW_SHIM2_INTEL_VS_ACTMCTL_DACTQE;
act |= SDW_SHIM2_INTEL_VS_ACTMCTL_DODS;
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 69719b335bcb..f048b3d55b2e 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -742,14 +742,15 @@ error_1:
* sdw_ml_sync_bank_switch: Multilink register bank switch
*
* @bus: SDW bus instance
+ * @multi_link: whether this is a multi-link stream with hardware-based sync
*
* Caller function should free the buffers on error
*/
-static int sdw_ml_sync_bank_switch(struct sdw_bus *bus)
+static int sdw_ml_sync_bank_switch(struct sdw_bus *bus, bool multi_link)
{
unsigned long time_left;
- if (!bus->multi_link)
+ if (!multi_link)
return 0;
/* Wait for completion of transfer */
@@ -847,7 +848,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
bus->bank_switch_timeout = DEFAULT_BANK_SWITCH_TIMEOUT;
/* Check if bank switch was successful */
- ret = sdw_ml_sync_bank_switch(bus);
+ ret = sdw_ml_sync_bank_switch(bus, multi_link);
if (ret < 0) {
dev_err(bus->dev,
"multi link bank switch failed: %d\n", ret);