summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_lpss.c10
-rw-r--r--drivers/acpi/acpi_memhotplug.c46
-rw-r--r--drivers/acpi/cppc_acpi.c47
-rw-r--r--drivers/acpi/prmt.c10
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/tables.c5
-rw-r--r--drivers/acpi/x86/s2idle.c67
-rw-r--r--drivers/ata/libata-core.c34
-rw-r--r--drivers/ata/libata-eh.c5
-rw-r--r--drivers/ata/libata-scsi.c10
-rw-r--r--drivers/ata/pata_falcon.c4
-rw-r--r--drivers/auxdisplay/cfag12864b.c2
-rw-r--r--drivers/auxdisplay/charlcd.c4
-rw-r--r--drivers/auxdisplay/hd44780.c2
-rw-r--r--drivers/auxdisplay/ks0108.c18
-rw-r--r--drivers/base/arch_numa.c5
-rw-r--r--drivers/base/arch_topology.c2
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/base/memory.c229
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/base/power/clock_ops.c17
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/runtime.c17
-rw-r--r--drivers/base/power/wakeirq.c11
-rw-r--r--drivers/block/Kconfig3
-rw-r--r--drivers/block/loop.c75
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/n64cart.c4
-rw-r--r--drivers/block/paride/Kconfig1
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/block/xen-blkback/xenbus.c1
-rw-r--r--drivers/block/xen-blkfront.c126
-rw-r--r--drivers/cdrom/cdrom.c78
-rw-r--r--drivers/char/ipmi/ipmi_si.h2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c29
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c4
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/at91/clk-generated.c6
-rw-r--r--drivers/clk/at91/sama7g5.c14
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c9
-rw-r--r--drivers/clk/clk-divider.c23
-rw-r--r--drivers/clk/clk-fractional-divider.c56
-rw-r--r--drivers/clk/clk-fractional-divider.h15
-rw-r--r--drivers/clk/clk-lmk04832.c18
-rw-r--r--drivers/clk/clk-palmas.c10
-rw-r--r--drivers/clk/clk-stm32f4.c8
-rw-r--r--drivers/clk/clk-stm32h7.c8
-rw-r--r--drivers/clk/clk-stm32mp1.c10
-rw-r--r--drivers/clk/clk-versaclock5.c42
-rw-r--r--drivers/clk/imx/clk-composite-7ulp.c1
-rw-r--r--drivers/clk/imx/clk-composite-8m.c3
-rw-r--r--drivers/clk/imx/clk-divider-gate.c10
-rw-r--r--drivers/clk/imx/clk-imx8mm.c11
-rw-r--r--drivers/clk/imx/clk-imx8mn.c16
-rw-r--r--drivers/clk/imx/clk-imx8mq.c7
-rw-r--r--drivers/clk/imx/clk.h16
-rw-r--r--drivers/clk/mediatek/Kconfig105
-rw-r--r--drivers/clk/mediatek/Makefile13
-rw-r--r--drivers/clk/mediatek/clk-cpumux.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-aud.c118
-rw-r--r--drivers/clk/mediatek/clk-mt8192-cam.c107
-rw-r--r--drivers/clk/mediatek/clk-mt8192-img.c70
-rw-r--r--drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c119
-rw-r--r--drivers/clk/mediatek/clk-mt8192-ipe.c57
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mdp.c82
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mfg.c50
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mm.c108
-rw-r--r--drivers/clk/mediatek/clk-mt8192-msdc.c85
-rw-r--r--drivers/clk/mediatek/clk-mt8192-scp_adsp.c50
-rw-r--r--drivers/clk/mediatek/clk-mt8192-vdec.c94
-rw-r--r--drivers/clk/mediatek/clk-mt8192-venc.c53
-rw-r--r--drivers/clk/mediatek/clk-mt8192.c1326
-rw-r--r--drivers/clk/mediatek/clk-mtk.c25
-rw-r--r--drivers/clk/mediatek/clk-mtk.h28
-rw-r--r--drivers/clk/mediatek/clk-mux.c11
-rw-r--r--drivers/clk/mediatek/clk-mux.h18
-rw-r--r--drivers/clk/mediatek/clk-pll.c31
-rw-r--r--drivers/clk/mediatek/reset.c2
-rw-r--r--drivers/clk/mvebu/kirkwood.c1
-rw-r--r--drivers/clk/pistachio/Kconfig8
-rw-r--r--drivers/clk/qcom/Kconfig60
-rw-r--r--drivers/clk/qcom/Makefile7
-rw-r--r--drivers/clk/qcom/a53-pll.c68
-rw-r--r--drivers/clk/qcom/apcs-msm8916.c10
-rw-r--r--drivers/clk/qcom/camcc-sc7180.c25
-rw-r--r--drivers/clk/qcom/clk-rpmh.c21
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c158
-rw-r--r--drivers/clk/qcom/dispcc-sc7280.c908
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c22
-rw-r--r--drivers/clk/qcom/gcc-msm8953.c4250
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c504
-rw-r--r--drivers/clk/qcom/gcc-sm6115.c3544
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c2584
-rw-r--r--drivers/clk/qcom/gpucc-sc7280.c491
-rw-r--r--drivers/clk/qcom/gpucc-sm8150.c12
-rw-r--r--drivers/clk/qcom/lpass-gfm-sm8250.c21
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7180.c18
-rw-r--r--drivers/clk/qcom/mmcc-msm8994.c2620
-rw-r--r--drivers/clk/qcom/mss-sc7180.c30
-rw-r--r--drivers/clk/qcom/q6sstop-qcs404.c32
-rw-r--r--drivers/clk/qcom/turingcc-qcs404.c30
-rw-r--r--drivers/clk/qcom/videocc-sc7280.c325
-rw-r--r--drivers/clk/ralink/clk-mt7621.c9
-rw-r--r--drivers/clk/renesas/Kconfig4
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/r8a774a1-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a774b1-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a774c0-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a774e1-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c72
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c (renamed from drivers/clk/renesas/renesas-rzg2l-cpg.c)22
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h (renamed from drivers/clk/renesas/renesas-rzg2l-cpg.h)0
-rw-r--r--drivers/clk/rockchip/clk-pll.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3308.c1
-rw-r--r--drivers/clk/rockchip/clk.c17
-rw-r--r--drivers/clk/socfpga/clk-agilex.c19
-rw-r--r--drivers/clk/tegra/clk-dfll.c2
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c6
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c3
-rw-r--r--drivers/clk/x86/Makefile2
-rw-r--r--drivers/clk/x86/clk-lpss-atom.c (renamed from drivers/clk/x86/clk-lpt.c)12
-rw-r--r--drivers/clk/zynqmp/clk-gate-zynqmp.c4
-rw-r--r--drivers/clk/zynqmp/clk-mux-zynqmp.c2
-rw-r--r--drivers/clk/zynqmp/clk-zynqmp.h1
-rw-r--r--drivers/clk/zynqmp/clkc.c4
-rw-r--r--drivers/clocksource/Kconfig3
-rw-r--r--drivers/cpufreq/Kconfig.arm12
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c14
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c4
-rw-r--r--drivers/cpufreq/cpufreq-dt.c3
-rw-r--r--drivers/cpufreq/cpufreq.c17
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c224
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c308
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c3
-rw-r--r--drivers/cpufreq/omap-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c16
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c151
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c65
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c3
-rw-r--r--drivers/cpufreq/sh-cpufreq.c11
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c25
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c77
-rw-r--r--drivers/cpuidle/cpuidle-ux500.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c4
-rw-r--r--drivers/cxl/Makefile4
-rw-r--r--drivers/cxl/acpi.c12
-rw-r--r--drivers/cxl/core/Makefile8
-rw-r--r--drivers/cxl/core/bus.c (renamed from drivers/cxl/core.c)464
-rw-r--r--drivers/cxl/core/core.h20
-rw-r--r--drivers/cxl/core/memdev.c246
-rw-r--r--drivers/cxl/core/pmem.c230
-rw-r--r--drivers/cxl/core/regs.c249
-rw-r--r--drivers/cxl/cxl.h1
-rw-r--r--drivers/cxl/cxlmem.h (renamed from drivers/cxl/mem.h)35
-rw-r--r--drivers/cxl/pci.c439
-rw-r--r--drivers/cxl/pci.h1
-rw-r--r--drivers/cxl/pmem.c2
-rw-r--r--drivers/dax/kmem.c43
-rw-r--r--drivers/dax/super.c191
-rw-r--r--drivers/devfreq/devfreq.c2
-rw-r--r--drivers/dma-buf/Kconfig4
-rw-r--r--drivers/dma/Kconfig28
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/acpi-dma.c18
-rw-r--r--drivers/dma/altera-msgdma.c37
-rw-r--r--drivers/dma/at_xdmac.c8
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c56
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h4
-rw-r--r--drivers/dma/dw/idma32.c138
-rw-r--r--drivers/dma/dw/internal.h16
-rw-r--r--drivers/dma/dw/of.c49
-rw-r--r--drivers/dma/dw/pci.c6
-rw-r--r--drivers/dma/dw/platform.c6
-rw-r--r--drivers/dma/ep93xx_dma.c6
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c10
-rw-r--r--drivers/dma/hisi_dma.c10
-rw-r--r--drivers/dma/idxd/Makefile8
-rw-r--r--drivers/dma/idxd/bus.c91
-rw-r--r--drivers/dma/idxd/cdev.c73
-rw-r--r--drivers/dma/idxd/compat.c107
-rw-r--r--drivers/dma/idxd/device.c382
-rw-r--r--drivers/dma/idxd/dma.c96
-rw-r--r--drivers/dma/idxd/idxd.h167
-rw-r--r--drivers/dma/idxd/init.c148
-rw-r--r--drivers/dma/idxd/irq.c190
-rw-r--r--drivers/dma/idxd/registers.h6
-rw-r--r--drivers/dma/idxd/submit.c43
-rw-r--r--drivers/dma/idxd/sysfs.c601
-rw-r--r--drivers/dma/ppc4xx/adma.c12
-rw-r--r--drivers/dma/ptdma/Kconfig13
-rw-r--r--drivers/dma/ptdma/Makefile10
-rw-r--r--drivers/dma/ptdma/ptdma-debugfs.c106
-rw-r--r--drivers/dma/ptdma/ptdma-dev.c305
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c389
-rw-r--r--drivers/dma/ptdma/ptdma-pci.c243
-rw-r--r--drivers/dma/ptdma/ptdma.h324
-rw-r--r--drivers/dma/sh/Kconfig9
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sh/rz-dmac.c969
-rw-r--r--drivers/dma/sh/usb-dmac.c2
-rw-r--r--drivers/dma/sprd-dma.c1
-rw-r--r--drivers/dma/stm32-dma.c8
-rw-r--r--drivers/dma/tegra210-adma.c7
-rw-r--r--drivers/dma/ti/k3-psil-j721e.c73
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c17
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c3
-rw-r--r--drivers/firewire/net.c4
-rw-r--r--drivers/firmware/dmi-id.c6
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c2
-rw-r--r--drivers/firmware/efi/libstub/vsprintf.c2
-rw-r--r--drivers/firmware/iscsi_ibft.c24
-rw-r--r--drivers/firmware/qcom_scm.c58
-rw-r--r--drivers/firmware/qcom_scm.h4
-rw-r--r--drivers/gpio/Kconfig15
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c178
-rw-r--r--drivers/gpio/gpio-brcmstb.c45
-rw-r--r--drivers/gpio/gpio-dwapb.c56
-rw-r--r--drivers/gpio/gpio-mc9s08dz60.c112
-rw-r--r--drivers/gpio/gpio-ml-ioh.c49
-rw-r--r--drivers/gpio/gpio-mlxbf2.c43
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c13
-rw-r--r--drivers/gpio/gpio-mt7621.c1
-rw-r--r--drivers/gpio/gpio-rcar.c4
-rw-r--r--drivers/gpio/gpio-tegra186.c30
-rw-r--r--drivers/gpio/gpio-viperboard.c14
-rw-r--r--drivers/gpio/gpio-virtio.c374
-rw-r--r--drivers/gpio/gpiolib-devres.c2
-rw-r--r--drivers/gpio/gpiolib-of.c16
-rw-r--r--drivers/gpio/gpiolib.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h1
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h24
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c2
-rw-r--r--drivers/gpu/drm/drm_prime.c13
-rw-r--r--drivers/gpu/drm/drm_print.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_pll.c1
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c31
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c14
-rw-r--r--drivers/hid/Kconfig7
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c62
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c69
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h23
-rw-r--r--drivers/hid/hid-apple.c32
-rw-r--r--drivers/hid/hid-asus.c15
-rw-r--r--drivers/hid/hid-cmedia.c90
-rw-r--r--drivers/hid/hid-elo.c6
-rw-r--r--drivers/hid/hid-ids.h9
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-logitech-hidpp.c47
-rw-r--r--drivers/hid/hid-magicmouse.c52
-rw-r--r--drivers/hid/hid-quirks.c2
-rw-r--r--drivers/hid/hid-sony.c49
-rw-r--r--drivers/hid/hid-thrustmaster.c7
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-goodix.c92
-rw-r--r--drivers/hid/usbhid/hid-core.c29
-rw-r--r--drivers/hid/wacom_sys.c9
-rw-r--r--drivers/hid/wacom_wac.c50
-rw-r--r--drivers/hid/wacom_wac.h2
-rw-r--r--drivers/hwmon/mr75203.c2
-rw-r--r--drivers/i2c/i2c-core-acpi.c32
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c3
-rw-r--r--drivers/iio/light/as73211.c3
-rw-r--r--drivers/infiniband/core/cache.c10
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/core_priv.h46
-rw-r--r--drivers/infiniband/core/device.c12
-rw-r--r--drivers/infiniband/core/iwcm.c19
-rw-r--r--drivers/infiniband/core/iwpm_msg.c34
-rw-r--r--drivers/infiniband/core/iwpm_util.c78
-rw-r--r--drivers/infiniband/core/iwpm_util.h18
-rw-r--r--drivers/infiniband/core/restrack.c2
-rw-r--r--drivers/infiniband/core/sa_query.c186
-rw-r--r--drivers/infiniband/core/umem.c56
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c5
-rw-r--r--drivers/infiniband/core/umem_odp.c3
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c31
-rw-r--r--drivers/infiniband/core/uverbs_std_types_qp.c29
-rw-r--r--drivers/infiniband/core/verbs.c234
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c30
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h7
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h5
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c37
-rw-r--r--drivers/infiniband/hw/efa/efa.h10
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c20
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c147
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c45
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h45
-rw-r--r--drivers/infiniband/hw/hfi1/aspm.h45
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c50
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h48
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h50
-rw-r--r--drivers/infiniband/hw/hfi1/common.h44
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c58
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.h49
-rw-r--r--drivers/infiniband/hw/hfi1/device.c44
-rw-r--r--drivers/infiniband/hw/hfi1/device.h49
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c44
-rw-r--r--drivers/infiniband/hw/hfi1/efivar.c44
-rw-r--r--drivers/infiniband/hw/hfi1/efivar.h45
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c45
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.h44
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.c44
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.h48
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c45
-rw-r--r--drivers/infiniband/hw/hfi1/fault.h50
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c51
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c44
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h54
-rw-r--r--drivers/infiniband/hw/hfi1/init.c53
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c44
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h49
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c3
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c44
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h45
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c45
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.h45
-rw-r--r--drivers/infiniband/hw/hfi1/msix.c43
-rw-r--r--drivers/infiniband/hw/hfi1/msix.h44
-rw-r--r--drivers/infiniband/hw/hfi1/opa_compat.h48
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c55
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c44
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h48
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c44
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c44
-rw-r--r--drivers/infiniband/hw/hfi1/platform.h45
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c44
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h48
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c44
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.h44
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c44
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c46
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c46
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h49
-rw-r--r--drivers/infiniband/hw/hfi1/sdma_txreq.h44
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c45
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c4
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c46
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h44
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ctxts.h45
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h45
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ibhdrs.h45
-rw-r--r--drivers/infiniband/hw/hfi1/trace_misc.h45
-rw-r--r--drivers/infiniband/hw/hfi1/trace_mmu.h45
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rc.h45
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h45
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h44
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c44
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c44
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c57
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h49
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c44
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c45
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h49
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c44
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h44
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c44
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h44
-rw-r--r--drivers/infiniband/hw/hfi1/vnic.h48
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c44
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_sdma.c44
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c74
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h30
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c81
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h23
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c40
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c31
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c196
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c28
-rw-r--r--drivers/infiniband/hw/irdma/protos.h2
-rw-r--r--drivers/infiniband/hw/irdma/utils.c3
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c33
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c3
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h5
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c4
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c25
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c3
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c51
-rw-r--r--drivers/infiniband/hw/mlx5/main.c145
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h7
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c7
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c232
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c21
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c25
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c77
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c25
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h5
-rw-r--r--drivers/infiniband/hw/qedr/main.c1
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c13
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.h5
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c101
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h4
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c12
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c34
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h10
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c69
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c15
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c53
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h5
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c44
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.h50
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c44
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.h50
-rw-r--r--drivers/infiniband/sw/rdmavt/mad.c44
-rw-r--r--drivers/infiniband/sw/rdmavt/mad.h50
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c44
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.h50
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.c44
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.h50
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c46
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h50
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.c44
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.h50
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c146
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.h55
-rw-r--r--drivers/infiniband/sw/rdmavt/rc.c44
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c44
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.h50
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.c44
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.h44
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_cq.h44
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_mr.h44
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_qp.h44
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_rc.h44
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_rvt.h44
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_tx.h44
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c53
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.h50
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_icrc.c124
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h61
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c25
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c59
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c23
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c33
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c59
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c1
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c44
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c54
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.h5
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c4
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c157
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h7
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h6
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c95
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h6
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c23
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c9
-rw-r--r--drivers/input/joystick/analog.c107
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/adc-keys.c2
-rw-r--r--drivers/input/keyboard/adp5588-keys.c2
-rw-r--r--drivers/input/keyboard/adp5589-keys.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c4
-rw-r--r--drivers/input/keyboard/hilkbd.c4
-rw-r--r--drivers/input/misc/Kconfig22
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c183
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c2
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c207
-rw-r--r--drivers/input/mouse/elan_i2c.h3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/serio/gscps2.c3
-rw-r--r--drivers/input/serio/parkbd.c14
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c1
-rw-r--r--drivers/input/touchscreen/mms114.c15
-rw-r--r--drivers/iommu/Kconfig69
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h6
-rw-r--r--drivers/iommu/amd/init.c60
-rw-r--r--drivers/iommu/amd/io_pgtable.c3
-rw-r--r--drivers/iommu/amd/iommu.c151
-rw-r--r--drivers/iommu/amd/iommu_v2.c13
-rw-r--r--drivers/iommu/apple-dart.c923
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c121
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c11
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c89
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h1
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c9
-rw-r--r--drivers/iommu/dma-iommu.c85
-rw-r--r--drivers/iommu/exynos-iommu.c19
-rw-r--r--drivers/iommu/intel/Kconfig19
-rw-r--r--drivers/iommu/intel/dmar.c2
-rw-r--r--drivers/iommu/intel/iommu.c197
-rw-r--r--drivers/iommu/intel/pasid.c18
-rw-r--r--drivers/iommu/intel/pasid.h10
-rw-r--r--drivers/iommu/intel/perf.c2
-rw-r--r--drivers/iommu/intel/svm.c19
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c62
-rw-r--r--drivers/iommu/io-pgtable-arm.c282
-rw-r--r--drivers/iommu/io-pgtable.c1
-rw-r--r--drivers/iommu/iommu.c210
-rw-r--r--drivers/iommu/iova.c16
-rw-r--r--drivers/iommu/ipmmu-vmsa.c28
-rw-r--r--drivers/iommu/mtk_iommu.c13
-rw-r--r--drivers/iommu/mtk_iommu_v1.c1
-rw-r--r--drivers/iommu/rockchip-iommu.c12
-rw-r--r--drivers/iommu/sprd-iommu.c7
-rw-r--r--drivers/iommu/sun50i-iommu.c13
-rw-r--r--drivers/iommu/virtio-iommu.c8
-rw-r--r--drivers/isdn/capi/capiutil.c2
-rw-r--r--drivers/macintosh/macio-adb.c1
-rw-r--r--drivers/macintosh/via-cuda.c2
-rw-r--r--drivers/macintosh/via-macii.c2
-rw-r--r--drivers/macintosh/via-pmu.c2
-rw-r--r--drivers/mailbox/Kconfig2
-rw-r--r--drivers/mailbox/mailbox-sti.c16
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c109
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c2
-rw-r--r--drivers/mailbox/qcom-ipcc.c1
-rw-r--r--drivers/md/dm-ima.c3
-rw-r--r--drivers/md/dm-table.c9
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/media/i2c/ov02a10.c2
-rw-r--r--drivers/mfd/Kconfig28
-rw-r--r--drivers/mfd/Makefile5
-rw-r--r--drivers/mfd/ab8500-core.c2
-rw-r--r--drivers/mfd/axp20x.c16
-rw-r--r--drivers/mfd/db8500-prcmu-regs.h (renamed from drivers/mfd/dbx500-prcmu-regs.h)0
-rw-r--r--drivers/mfd/db8500-prcmu.c25
-rw-r--r--drivers/mfd/intel-lpss-acpi.c18
-rw-r--r--drivers/mfd/intel-lpss.c3
-rw-r--r--drivers/mfd/intel-m10-bmc.c12
-rw-r--r--drivers/mfd/intel_quark_i2c_gpio.c71
-rw-r--r--drivers/mfd/lpc_ich.c1
-rw-r--r--drivers/mfd/lpc_sch.c4
-rw-r--r--drivers/mfd/mt6360-core.c8
-rw-r--r--drivers/mfd/rsmu.h16
-rw-r--r--drivers/mfd/rsmu_core.c88
-rw-r--r--drivers/mfd/rsmu_i2c.c203
-rw-r--r--drivers/mfd/rsmu_spi.c273
-rw-r--r--drivers/mfd/simple-mfd-i2c.c41
-rw-r--r--drivers/mfd/simple-mfd-i2c.h32
-rw-r--r--drivers/mfd/stmpe.c4
-rw-r--r--drivers/mfd/syscon.c2
-rw-r--r--drivers/mfd/tc3589x.c2
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c5
-rw-r--r--drivers/mfd/tps65086.c22
-rw-r--r--drivers/mfd/tqmx86.c48
-rw-r--r--drivers/mfd/wm8994-irq.c2
-rw-r--r--drivers/misc/habanalabs/common/Makefile3
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c4
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c1299
-rw-r--r--drivers/misc/habanalabs/common/context.c146
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c184
-rw-r--r--drivers/misc/habanalabs/common/device.c163
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c56
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h421
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c13
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c2
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c198
-rw-r--r--drivers/misc/habanalabs/common/memory.c169
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu_v1.c12
-rw-r--r--drivers/misc/habanalabs/common/pci/pci.c2
-rw-r--r--drivers/misc/habanalabs/common/state_dump.c718
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c20
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c716
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h19
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c5
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c8
-rw-r--r--drivers/misc/habanalabs/goya/goya.c102
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h115
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h62
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h3
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h17
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h1
-rw-r--r--drivers/misc/lkdtm/core.c10
-rw-r--r--drivers/misc/lkdtm/lkdtm.h28
-rw-r--r--drivers/misc/pci_endpoint_test.c9
-rw-r--r--drivers/mmc/host/jz4740_mmc.c4
-rw-r--r--drivers/mmc/host/mmc_spi.c2
-rw-r--r--drivers/mtd/Kconfig10
-rw-r--r--drivers/mtd/ftl.c2
-rw-r--r--drivers/mtd/maps/Kconfig23
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c227
-rw-r--r--drivers/mtd/mtd_blkdevs.c60
-rw-r--r--drivers/mtd/mtdblock.c4
-rw-r--r--drivers/mtd/mtdblock_ro.c4
-rw-r--r--drivers/mtd/mtdconcat.c33
-rw-r--r--drivers/mtd/nand/raw/Kconfig4
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c4
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c29
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c4
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c33
-rw-r--r--drivers/mtd/nand/raw/omap2.c2
-rw-r--r--drivers/mtd/nand/spi/core.c4
-rw-r--r--drivers/mtd/nand/spi/macronix.c16
-rw-r--r--drivers/mtd/rfd_ftl.c46
-rw-r--r--drivers/net/bonding/bond_3ad.c8
-rw-r--r--drivers/net/bonding/bond_main.c17
-rw-r--r--drivers/net/can/c_can/c_can_ethtool.c4
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c34
-rw-r--r--drivers/net/dsa/b53/b53_priv.h1
-rw-r--r--drivers/net/dsa/lantiq_gswip.c3
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c91
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c122
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c14
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c115
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c85
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c2
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c3
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-ethtool.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.c78
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c79
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c46
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c1
-rw-r--r--drivers/net/phy/phylink.c82
-rw-r--r--drivers/net/usb/cdc_mbim.c5
-rw-r--r--drivers/net/usb/hso.c11
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c1
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes.c1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c30
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c12
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c15
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c12
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h3
-rw-r--r--drivers/ntb/test/ntb_msi_test.c4
-rw-r--r--drivers/ntb/test/ntb_perf.c1
-rw-r--r--drivers/ntb/test/ntb_pingpong.c2
-rw-r--r--drivers/nvdimm/label.c256
-rw-r--r--drivers/nvdimm/label.h1
-rw-r--r--drivers/nvdimm/namespace_devs.c113
-rw-r--r--drivers/nvdimm/nd.h150
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvme/host/core.c68
-rw-r--r--drivers/nvme/host/multipath.c19
-rw-r--r--drivers/nvme/host/nvme.h10
-rw-r--r--drivers/nvme/host/tcp.c22
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/configfs.c5
-rw-r--r--drivers/nvme/target/core.c10
-rw-r--r--drivers/nvme/target/nvmet.h11
-rw-r--r--drivers/nvme/target/passthru.c14
-rw-r--r--drivers/of/device.c40
-rw-r--r--drivers/of/of_reserved_mem.c12
-rw-r--r--drivers/of/property.c3
-rw-r--r--drivers/parisc/ccio-dma.c96
-rw-r--r--drivers/parisc/dino.c18
-rw-r--r--drivers/parisc/led.c4
-rw-r--r--drivers/parisc/sba_iommu.c2
-rw-r--r--drivers/parport/parport_gsc.c8
-rw-r--r--drivers/pci/ats.c2
-rw-r--r--drivers/pci/controller/Kconfig1
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c61
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c200
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c3
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c16
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h29
-rw-r--r--drivers/pci/controller/dwc/Kconfig48
-rw-r--r--drivers/pci/controller/dwc/Makefile3
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c16
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c14
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c36
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c279
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c460
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c54
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c332
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c15
-rw-r--r--drivers/pci/controller/pci-aardvark.c334
-rw-r--r--drivers/pci/controller/pci-ftpci100.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c153
-rw-r--r--drivers/pci/controller/pci-tegra.c38
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c10
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c10
-rw-r--r--drivers/pci/controller/pcie-altera.c10
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c9
-rw-r--r--drivers/pci/controller/pcie-iproc-bcma.c16
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c4
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c13
-rw-r--r--drivers/pci/controller/pcie-mediatek.c64
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c18
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c23
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c94
-rw-r--r--drivers/pci/controller/pcie-rcar.h7
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c18
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c8
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c4
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c25
-rw-r--r--drivers/pci/controller/pcie-xilinx.c9
-rw-r--r--drivers/pci/controller/vmd.c55
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c89
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c74
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c24
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c134
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c146
-rw-r--r--drivers/pci/host-bridge.c1
-rw-r--r--drivers/pci/hotplug/TODO3
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c5
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c2
-rw-r--r--drivers/pci/hotplug/pnv_php.c2
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/pci-acpi.c159
-rw-r--r--drivers/pci/pci-bridge-emul.h2
-rw-r--r--drivers/pci/pci-driver.c28
-rw-r--r--drivers/pci/pci-sysfs.c3
-rw-r--r--drivers/pci/pci.c331
-rw-r--r--drivers/pci/pci.h47
-rw-r--r--drivers/pci/pcie/aer.c12
-rw-r--r--drivers/pci/pcie/portdrv_core.c9
-rw-r--r--drivers/pci/pcie/ptm.c4
-rw-r--r--drivers/pci/probe.c29
-rw-r--r--drivers/pci/proc.c1
-rw-r--r--drivers/pci/quirks.c128
-rw-r--r--drivers/pci/remove.c1
-rw-r--r--drivers/pci/syscall.c7
-rw-r--r--drivers/pci/vpd.c490
-rw-r--r--drivers/pci/xen-pcifront.c32
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c2
-rw-r--r--drivers/pinctrl/Kconfig24
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c4
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.c3
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.h1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c3
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8dxl.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mn.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8qxp.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8ulp.c278
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8365.c1
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c16
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c545
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c1731
-rw-r--r--drivers/pinctrl/pinctrl-single.c21
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c6
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c2
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c10
-rw-r--r--drivers/pinctrl/qcom/Kconfig17
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9607.c1087
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115.c923
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c37
-rw-r--r--drivers/pinctrl/renesas/Kconfig11
-rw-r--r--drivers/pinctrl/renesas/Makefile1
-rw-r--r--drivers/pinctrl/renesas/core.c29
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77995.c320
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c1175
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c16
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h7
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c116
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h29
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h1
-rw-r--r--drivers/pinctrl/stm32/Kconfig6
-rw-r--r--drivers/pinctrl/stm32/Makefile1
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32mp135.c1679
-rw-r--r--drivers/platform/chrome/Makefile2
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c9
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_ring.c14
-rw-r--r--drivers/platform/chrome/cros_ec_trace.h94
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c27
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c13
-rw-r--r--drivers/platform/surface/aggregator/Makefile15
-rw-r--r--drivers/platform/surface/surface3_power.c8
-rw-r--r--drivers/platform/x86/Kconfig291
-rw-r--r--drivers/platform/x86/Makefile29
-rw-r--r--drivers/platform/x86/acer-wmi.c179
-rw-r--r--drivers/platform/x86/asus-wmi.c416
-rw-r--r--drivers/platform/x86/dell/Kconfig2
-rw-r--r--drivers/platform/x86/dell/dcdbas.c4
-rw-r--r--drivers/platform/x86/dell/dell-smbios-smm.c31
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c3
-rw-r--r--drivers/platform/x86/dell/dell-smo8800.c74
-rw-r--r--drivers/platform/x86/dual_accel_detect.h26
-rw-r--r--drivers/platform/x86/hp_accel.c78
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c27
-rw-r--r--drivers/platform/x86/ideapad-laptop.c9
-rw-r--r--drivers/platform/x86/intel/Kconfig149
-rw-r--r--drivers/platform/x86/intel/Makefile39
-rw-r--r--drivers/platform/x86/intel/atomisp2/Kconfig43
-rw-r--r--drivers/platform/x86/intel/atomisp2/Makefile9
-rw-r--r--drivers/platform/x86/intel/atomisp2/led.c (renamed from drivers/platform/x86/intel_atomisp2_led.c)0
-rw-r--r--drivers/platform/x86/intel/atomisp2/pm.c (renamed from drivers/platform/x86/intel_atomisp2_pm.c)0
-rw-r--r--drivers/platform/x86/intel/bxtwc_tmu.c (renamed from drivers/platform/x86/intel_bxtwc_tmu.c)0
-rw-r--r--drivers/platform/x86/intel/chtdc_ti_pwrbtn.c (renamed from drivers/platform/x86/intel_chtdc_ti_pwrbtn.c)0
-rw-r--r--drivers/platform/x86/intel/hid.c (renamed from drivers/platform/x86/intel-hid.c)2
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c (renamed from drivers/platform/x86/intel_int0002_vgpio.c)0
-rw-r--r--drivers/platform/x86/intel/int1092/Kconfig14
-rw-r--r--drivers/platform/x86/intel/int1092/Makefile1
-rw-r--r--drivers/platform/x86/intel/int1092/intel_sar.c316
-rw-r--r--drivers/platform/x86/intel/int1092/intel_sar.h86
-rw-r--r--drivers/platform/x86/intel/int33fe/Makefile2
-rw-r--r--drivers/platform/x86/intel/int33fe/intel_cht_int33fe_common.c29
-rw-r--r--drivers/platform/x86/intel/int3472/Makefile2
-rw-r--r--drivers/platform/x86/intel/mrfld_pwrbtn.c (renamed from drivers/platform/x86/intel_mrfld_pwrbtn.c)0
-rw-r--r--drivers/platform/x86/intel/oaktrail.c (renamed from drivers/platform/x86/intel_oaktrail.c)0
-rw-r--r--drivers/platform/x86/intel/pmc/Kconfig25
-rw-r--r--drivers/platform/x86/intel/pmc/Makefile9
-rw-r--r--drivers/platform/x86/intel/pmc/core.c (renamed from drivers/platform/x86/intel_pmc_core.c)374
-rw-r--r--drivers/platform/x86/intel/pmc/core.h (renamed from drivers/platform/x86/intel_pmc_core.h)19
-rw-r--r--drivers/platform/x86/intel/pmc/pltdrv.c (renamed from drivers/platform/x86/intel_pmc_core_pltdrv.c)0
-rw-r--r--drivers/platform/x86/intel/pmt/Kconfig40
-rw-r--r--drivers/platform/x86/intel/pmt/Makefile12
-rw-r--r--drivers/platform/x86/intel/pmt/class.c (renamed from drivers/platform/x86/intel_pmt_class.c)2
-rw-r--r--drivers/platform/x86/intel/pmt/class.h (renamed from drivers/platform/x86/intel_pmt_class.h)0
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c (renamed from drivers/platform/x86/intel_pmt_crashlog.c)2
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c (renamed from drivers/platform/x86/intel_pmt_telemetry.c)10
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c (renamed from drivers/platform/x86/intel_punit_ipc.c)0
-rw-r--r--drivers/platform/x86/intel/rst.c (renamed from drivers/platform/x86/intel-rst.c)0
-rw-r--r--drivers/platform/x86/intel/smartconnect.c (renamed from drivers/platform/x86/intel-smartconnect.c)0
-rw-r--r--drivers/platform/x86/intel/speed_select_if/Kconfig (renamed from drivers/platform/x86/intel_speed_select_if/Kconfig)0
-rw-r--r--drivers/platform/x86/intel/speed_select_if/Makefile (renamed from drivers/platform/x86/intel_speed_select_if/Makefile)0
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c (renamed from drivers/platform/x86/intel_speed_select_if/isst_if_common.c)9
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.h (renamed from drivers/platform/x86/intel_speed_select_if/isst_if_common.h)0
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c (renamed from drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c)0
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c (renamed from drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c)0
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c (renamed from drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c)0
-rw-r--r--drivers/platform/x86/intel/telemetry/Kconfig16
-rw-r--r--drivers/platform/x86/intel/telemetry/Makefile11
-rw-r--r--drivers/platform/x86/intel/telemetry/core.c (renamed from drivers/platform/x86/intel_telemetry_core.c)0
-rw-r--r--drivers/platform/x86/intel/telemetry/debugfs.c (renamed from drivers/platform/x86/intel_telemetry_debugfs.c)0
-rw-r--r--drivers/platform/x86/intel/telemetry/pltdrv.c (renamed from drivers/platform/x86/intel_telemetry_pltdrv.c)0
-rw-r--r--drivers/platform/x86/intel/turbo_max_3.c (renamed from drivers/platform/x86/intel_turbo_max_3.c)0
-rw-r--r--drivers/platform/x86/intel/uncore-frequency.c (renamed from drivers/platform/x86/intel-uncore-frequency.c)0
-rw-r--r--drivers/platform/x86/intel/vbtn.c (renamed from drivers/platform/x86/intel-vbtn.c)2
-rw-r--r--drivers/platform/x86/intel/wmi/Kconfig31
-rw-r--r--drivers/platform/x86/intel/wmi/Makefile9
-rw-r--r--drivers/platform/x86/intel/wmi/sbl-fw-update.c (renamed from drivers/platform/x86/intel-wmi-sbl-fw-update.c)0
-rw-r--r--drivers/platform/x86/intel/wmi/thunderbolt.c (renamed from drivers/platform/x86/intel-wmi-thunderbolt.c)0
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/lg-laptop.c107
-rw-r--r--drivers/platform/x86/meraki-mx100.c230
-rw-r--r--drivers/platform/x86/think-lmi.c80
-rw-r--r--drivers/platform/x86/think-lmi.h1
-rw-r--r--drivers/pwm/Kconfig5
-rw-r--r--drivers/pwm/core.c4
-rw-r--r--drivers/pwm/pwm-ab8500.c35
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c5
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c5
-rw-r--r--drivers/pwm/pwm-atmel.c102
-rw-r--r--drivers/pwm/pwm-bcm-kona.c12
-rw-r--r--drivers/pwm/pwm-brcmstb.c5
-rw-r--r--drivers/pwm/pwm-cros-ec.c4
-rw-r--r--drivers/pwm/pwm-ep93xx.c11
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c10
-rw-r--r--drivers/pwm/pwm-hibvt.c4
-rw-r--r--drivers/pwm/pwm-img.c20
-rw-r--r--drivers/pwm/pwm-imx-tpm.c5
-rw-r--r--drivers/pwm/pwm-imx27.c14
-rw-r--r--drivers/pwm/pwm-intel-lgm.c12
-rw-r--r--drivers/pwm/pwm-iqs620a.c16
-rw-r--r--drivers/pwm/pwm-jz4740.c12
-rw-r--r--drivers/pwm/pwm-keembay.c12
-rw-r--r--drivers/pwm/pwm-lp3943.c12
-rw-r--r--drivers/pwm/pwm-lpc32xx.c22
-rw-r--r--drivers/pwm/pwm-mediatek.c12
-rw-r--r--drivers/pwm/pwm-mtk-disp.c174
-rw-r--r--drivers/pwm/pwm-mxs.c25
-rw-r--r--drivers/pwm/pwm-ntxec.c14
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c5
-rw-r--r--drivers/pwm/pwm-pca9685.c5
-rw-r--r--drivers/pwm/pwm-pxa.c13
-rw-r--r--drivers/pwm/pwm-raspberrypi-poe.c12
-rw-r--r--drivers/pwm/pwm-rcar.c5
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c5
-rw-r--r--drivers/pwm/pwm-rockchip.c16
-rw-r--r--drivers/pwm/pwm-samsung.c5
-rw-r--r--drivers/pwm/pwm-sifive.c6
-rw-r--r--drivers/pwm/pwm-sl28cpld.c12
-rw-r--r--drivers/pwm/pwm-stm32-lp.c12
-rw-r--r--drivers/pwm/pwm-sun4i.c5
-rw-r--r--drivers/pwm/pwm-tiecap.c6
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c4
-rw-r--r--drivers/pwm/pwm-twl-led.c17
-rw-r--r--drivers/pwm/pwm-twl.c17
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c1
-rw-r--r--drivers/remoteproc/qcom_wcnss.c49
-rw-r--r--drivers/remoteproc/qcom_wcnss.h4
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c120
-rw-r--r--drivers/remoteproc/remoteproc_core.c4
-rw-r--r--drivers/remoteproc/remoteproc_elf_helpers.h2
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/lib.c107
-rw-r--r--drivers/rtc/lib_test.c81
-rw-r--r--drivers/rtc/rtc-cmos.c8
-rw-r--r--drivers/rtc/rtc-rx8025.c46
-rw-r--r--drivers/rtc/rtc-s5m.c48
-rw-r--r--drivers/rtc/rtc-tps65910.c2
-rw-r--r--drivers/s390/block/Kconfig11
-rw-r--r--drivers/s390/block/Makefile1
-rw-r--r--drivers/s390/block/xpram.c416
-rw-r--r--drivers/s390/char/con3270.c7
-rw-r--r--drivers/s390/char/ctrlchar.c11
-rw-r--r--drivers/s390/char/hmcdrv_ftp.c2
-rw-r--r--drivers/s390/char/sclp.c2
-rw-r--r--drivers/s390/cio/blacklist.c5
-rw-r--r--drivers/s390/cio/device.c21
-rw-r--r--drivers/s390/cio/device_id.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c8
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c282
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h6
-rw-r--r--drivers/s390/crypto/zcrypt_api.c14
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c8
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c10
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c8
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c18
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c38
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c8
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/s390/scsi/zfcp_unit.c4
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/BusLogic.c8
-rw-r--r--drivers/scsi/Kconfig18
-rw-r--r--drivers/scsi/Makefile9
-rw-r--r--drivers/scsi/NCR5380.c6
-rw-r--r--drivers/scsi/aacraid/aachba.c3
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/advansys.c4
-rw-r--r--drivers/scsi/aha1542.c6
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c84
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/ch.c73
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c6
-rw-r--r--drivers/scsi/cxlflash/main.c36
-rw-r--r--drivers/scsi/dpt_i2o.c4
-rw-r--r--drivers/scsi/elx/efct/efct_driver.h1
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c51
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c2
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/isci/request.c4
-rw-r--r--drivers/scsi/lasi700.c4
-rw-r--r--drivers/scsi/libsas/Kconfig1
-rw-r--r--drivers/scsi/libsas/Makefile2
-rw-r--r--drivers/scsi/libsas/sas_ata.c6
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c2
-rw-r--r--drivers/scsi/libsas/sas_init.c2
-rw-r--r--drivers/scsi/libsas/sas_phy.c2
-rw-r--r--drivers/scsi/libsas/sas_port.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c8
-rw-r--r--drivers/scsi/lpfc/lpfc.h253
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c247
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c223
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c1193
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c221
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h275
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1495
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c396
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c966
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c10
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h19
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c37
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c76
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c317
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h40
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c37
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c24
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c160
-rw-r--r--drivers/scsi/mvumi.c2
-rw-r--r--drivers/scsi/myrb.c11
-rw-r--r--drivers/scsi/myrs.c11
-rw-r--r--drivers/scsi/ncr53c8xx.c203
-rw-r--r--drivers/scsi/pcmcia/fdomain_cs.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c8
-rw-r--r--drivers/scsi/qedf/qedf_main.c10
-rw-r--r--drivers/scsi/qedi/qedi_fw.c23
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c36
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c14
-rw-r--r--drivers/scsi/qla1280.c7
-rw-r--r--drivers/scsi/qla2xxx/Makefile3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h221
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c3461
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.h136
-rw-r--r--drivers/scsi/qla2xxx/qla_edif_bsg.h220
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h51
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c19
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c347
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h16
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c163
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c357
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c59
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c49
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c77
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c257
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c196
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h19
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c30
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c14
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--drivers/scsi/scsi_bsg.c106
-rw-r--r--drivers/scsi/scsi_common.c9
-rw-r--r--drivers/scsi/scsi_debug.c125
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c16
-rw-r--r--drivers/scsi/scsi_ioctl.c851
-rw-r--r--drivers/scsi/scsi_lib.c37
-rw-r--r--drivers/scsi/scsi_logging.c18
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_scan.c5
-rw-r--r--drivers/scsi/scsi_sysfs.c24
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/sd.c125
-rw-r--r--drivers/scsi/sd_zbc.c10
-rw-r--r--drivers/scsi/sg.c33
-rw-r--r--drivers/scsi/smartpqi/Kconfig8
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h8
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c72
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c4
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c4
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h4
-rw-r--r--drivers/scsi/snic/snic_scsi.c14
-rw-r--r--drivers/scsi/sr.c143
-rw-r--r--drivers/scsi/st.c73
-rw-r--r--drivers/scsi/stex.c6
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/sun3_scsi.c5
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/ufs/Kconfig16
-rw-r--r--drivers/scsi/ufs/Makefile2
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c7
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pci.c32
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pltfrm.c7
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c11
-rw-r--r--drivers/scsi/ufs/ufs-exynos.h2
-rw-r--r--drivers/scsi/ufs/ufs-fault-injection.c70
-rw-r--r--drivers/scsi/ufs/ufs-fault-injection.h24
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c7
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c7
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c7
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c25
-rw-r--r--drivers/scsi/ufs/ufs.h54
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h6
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c48
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c47
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.h18
-rw-r--r--drivers/scsi/ufs/ufshcd.c634
-rw-r--r--drivers/scsi/ufs/ufshcd.h107
-rw-r--r--drivers/scsi/ufs/ufshci.h1
-rw-r--r--drivers/scsi/ufs/ufshpb.c2933
-rw-r--r--drivers/scsi/ufs/ufshpb.h323
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/scsi/wd719x.c8
-rw-r--r--drivers/scsi/xen-scsifront.c2
-rw-r--r--drivers/scsi/zalon.c4
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h2
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h2
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_env.h2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h2
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_internal.h2
-rw-r--r--drivers/target/Kconfig2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c2
-rw-r--r--drivers/target/loopback/tcm_loop.c8
-rw-r--r--drivers/target/sbp/sbp_target.c4
-rw-r--r--drivers/target/target_core_alua.c94
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_pscsi.c18
-rw-r--r--drivers/target/target_core_transport.c48
-rw-r--r--drivers/target/target_core_user.c150
-rw-r--r--drivers/target/target_core_xcopy.c26
-rw-r--r--drivers/thermal/devfreq_cooling.c2
-rw-r--r--drivers/thermal/intel/Kconfig9
-rw-r--r--drivers/thermal/intel/Makefile1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c14
-rw-r--r--drivers/thermal/intel/intel_menlow.c (renamed from drivers/platform/x86/intel_menlow.c)0
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c4
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c2
-rw-r--r--drivers/thermal/qcom/Kconfig10
-rw-r--r--drivers/thermal/qcom/Makefile1
-rw-r--r--drivers/thermal/qcom/lmh.c232
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c6
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c110
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c1
-rw-r--r--drivers/thermal/tegra/Kconfig9
-rw-r--r--drivers/thermal/tegra/Makefile1
-rw-r--r--drivers/thermal/tegra/soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra30-tsensor.c673
-rw-r--r--drivers/thunderbolt/test.c98
-rw-r--r--drivers/tty/serial/mux.c3
-rw-r--r--drivers/usb/serial/cp210x.c77
-rw-r--r--drivers/usb/serial/cypress_m8.c6
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/garmin_gps.c2
-rw-r--r--drivers/usb/serial/io_edgeport.c33
-rw-r--r--drivers/usb/serial/io_ti.c4
-rw-r--r--drivers/usb/serial/ipaq.c4
-rw-r--r--drivers/usb/serial/iuu_phoenix.c10
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/sierra.c2
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/vdpa/Kconfig11
-rw-r--r--drivers/vdpa/Makefile1
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c8
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h25
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c249
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h26
-rw-r--r--drivers/vdpa/mlx5/core/mr.c81
-rw-r--r--drivers/vdpa/mlx5/core/resources.c35
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c555
-rw-r--r--drivers/vdpa/vdpa.c9
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c29
-rw-r--r--drivers/vdpa/vdpa_user/Makefile5
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c545
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h73
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c1641
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c17
-rw-r--r--drivers/vfio/Kconfig31
-rw-r--r--drivers/vfio/fsl-mc/Kconfig3
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c159
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c6
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_private.h7
-rw-r--r--drivers/vfio/mdev/Kconfig1
-rw-r--r--drivers/vfio/mdev/mdev_core.c6
-rw-r--r--drivers/vfio/mdev/vfio_mdev.c33
-rw-r--r--drivers/vfio/pci/Kconfig40
-rw-r--r--drivers/vfio/pci/Makefile8
-rw-r--r--drivers/vfio/pci/vfio_pci.c2270
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c70
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c2157
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c23
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c42
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h215
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c18
-rw-r--r--drivers/vfio/pci/vfio_pci_zdev.c11
-rw-r--r--drivers/vfio/platform/Kconfig6
-rw-r--r--drivers/vfio/platform/reset/Kconfig4
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c10
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c86
-rw-r--r--drivers/vfio/platform/vfio_platform_private.h1
-rw-r--r--drivers/vfio/vfio.c144
-rw-r--r--drivers/vfio/vfio_iommu_type1.c8
-rw-r--r--drivers/vhost/iotlb.c20
-rw-r--r--drivers/vhost/scsi.c14
-rw-r--r--drivers/vhost/vdpa.c188
-rw-r--r--drivers/vhost/vsock.c28
-rw-r--r--drivers/video/backlight/ktd253-backlight.c75
-rw-r--r--drivers/video/backlight/pwm_bl.c54
-rw-r--r--drivers/video/fbdev/core/fbmem.c6
-rw-r--r--drivers/virtio/virtio.c56
-rw-r--r--drivers/virtio/virtio_balloon.c4
-rw-r--r--drivers/virtio/virtio_mem.c26
-rw-r--r--drivers/watchdog/Kconfig12
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/bcm2835_wdt.c10
-rw-r--r--drivers/watchdog/bd70528_wdt.c291
-rw-r--r--drivers/watchdog/iTCO_wdt.c2
-rw-r--r--drivers/watchdog/imx2_wdt.c1
-rw-r--r--drivers/watchdog/max63xx_wdt.c24
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c2
-rw-r--r--drivers/watchdog/mtk_wdt.c6
-rw-r--r--drivers/watchdog/sl28cpld_wdt.c2
-rw-r--r--drivers/watchdog/tqmx86_wdt.c2
-rw-r--r--drivers/watchdog/watchdog_core.c37
-rw-r--r--drivers/watchdog/watchdog_dev.c57
-rw-r--r--drivers/xen/features.c18
-rw-r--r--drivers/xen/gntdev.c36
-rw-r--r--drivers/xen/swiotlb-xen.c10
-rw-r--r--drivers/xen/xen-scsiback.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c9
1308 files changed, 74407 insertions, 22581 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 894b7e6ae144..30b1f511c2af 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -385,7 +385,9 @@ static struct platform_device *lpss_clk_dev;
static inline void lpt_register_clock_device(void)
{
- lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
+ lpss_clk_dev = platform_device_register_simple("clk-lpss-atom",
+ PLATFORM_DEVID_NONE,
+ NULL, 0);
}
static int register_device_clock(struct acpi_device *adev,
@@ -434,8 +436,8 @@ static int register_device_clock(struct acpi_device *adev,
if (!clk_name)
return -ENOMEM;
clk = clk_register_fractional_divider(NULL, clk_name, parent,
- 0, prv_base,
- 1, 15, 16, 15, 0, NULL);
+ CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
+ prv_base, 1, 15, 16, 15, 0, NULL);
parent = clk_name;
clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
@@ -1337,7 +1339,7 @@ void __init acpi_lpss_init(void)
const struct x86_cpu_id *id;
int ret;
- ret = lpt_clk_init();
+ ret = lpss_atom_clk_init();
if (ret)
return;
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 8cc195c4c861..24f662d8bd39 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -54,6 +54,7 @@ struct acpi_memory_info {
struct acpi_memory_device {
struct acpi_device *device;
struct list_head res_list;
+ int mgid;
};
static acpi_status
@@ -169,12 +170,33 @@ static void acpi_unbind_memory_blocks(struct acpi_memory_info *info)
static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
{
acpi_handle handle = mem_device->device->handle;
+ mhp_t mhp_flags = MHP_NID_IS_MGID;
int result, num_enabled = 0;
struct acpi_memory_info *info;
- mhp_t mhp_flags = MHP_NONE;
- int node;
+ u64 total_length = 0;
+ int node, mgid;
node = acpi_get_node(handle);
+
+ list_for_each_entry(info, &mem_device->res_list, list) {
+ if (!info->length)
+ continue;
+ /* We want a single node for the whole memory group */
+ if (node < 0)
+ node = memory_add_physaddr_to_nid(info->start_addr);
+ total_length += info->length;
+ }
+
+ if (!total_length) {
+ dev_err(&mem_device->device->dev, "device is empty\n");
+ return -EINVAL;
+ }
+
+ mgid = memory_group_register_static(node, PFN_UP(total_length));
+ if (mgid < 0)
+ return mgid;
+ mem_device->mgid = mgid;
+
/*
* Tell the VM there is more memory here...
* Note: Assume that this function returns zero on success
@@ -182,22 +204,16 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
* (i.e. memory-hot-remove function)
*/
list_for_each_entry(info, &mem_device->res_list, list) {
- if (info->enabled) { /* just sanity check...*/
- num_enabled++;
- continue;
- }
/*
* If the memory block size is zero, please ignore it.
* Don't try to do the following memory hotplug flowchart.
*/
if (!info->length)
continue;
- if (node < 0)
- node = memory_add_physaddr_to_nid(info->start_addr);
if (mhp_supports_memmap_on_memory(info->length))
mhp_flags |= MHP_MEMMAP_ON_MEMORY;
- result = __add_memory(node, info->start_addr, info->length,
+ result = __add_memory(mgid, info->start_addr, info->length,
mhp_flags);
/*
@@ -239,19 +255,14 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
{
- acpi_handle handle = mem_device->device->handle;
struct acpi_memory_info *info, *n;
- int nid = acpi_get_node(handle);
list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
if (!info->enabled)
continue;
- if (nid == NUMA_NO_NODE)
- nid = memory_add_physaddr_to_nid(info->start_addr);
-
acpi_unbind_memory_blocks(info);
- __remove_memory(nid, info->start_addr, info->length);
+ __remove_memory(info->start_addr, info->length);
list_del(&info->list);
kfree(info);
}
@@ -262,6 +273,10 @@ static void acpi_memory_device_free(struct acpi_memory_device *mem_device)
if (!mem_device)
return;
+ /* In case we succeeded adding *some* memory, unregistering fails. */
+ if (mem_device->mgid >= 0)
+ memory_group_unregister(mem_device->mgid);
+
acpi_memory_free_device_resources(mem_device);
mem_device->device->driver_data = NULL;
kfree(mem_device);
@@ -282,6 +297,7 @@ static int acpi_memory_device_add(struct acpi_device *device,
INIT_LIST_HEAD(&mem_device->res_list);
mem_device->device = device;
+ mem_device->mgid = -1;
sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
device->driver_data = mem_device;
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index a4d4eebba1da..bd482108310c 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1008,23 +1008,14 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return ret_val;
}
-/**
- * cppc_get_desired_perf - Get the value of desired performance register.
- * @cpunum: CPU from which to get desired performance.
- * @desired_perf: address of a variable to store the returned desired performance
- *
- * Return: 0 for success, -EIO otherwise.
- */
-int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
+static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cpc_register_resource *desired_reg;
- struct cppc_pcc_data *pcc_ss_data = NULL;
-
- desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
+ struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx];
- if (CPC_IN_PCC(desired_reg)) {
+ if (CPC_IN_PCC(reg)) {
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = 0;
if (pcc_ss_id < 0)
@@ -1035,7 +1026,7 @@ int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
down_write(&pcc_ss_data->pcc_lock);
if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
- cpc_read(cpunum, desired_reg, desired_perf);
+ cpc_read(cpunum, reg, perf);
else
ret = -EIO;
@@ -1044,13 +1035,37 @@ int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
return ret;
}
- cpc_read(cpunum, desired_reg, desired_perf);
+ cpc_read(cpunum, reg, perf);
return 0;
}
+
+/**
+ * cppc_get_desired_perf - Get the desired performance register value.
+ * @cpunum: CPU from which to get desired performance.
+ * @desired_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
+{
+ return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
+}
EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
/**
+ * cppc_get_nominal_perf - Get the nominal performance register value.
+ * @cpunum: CPU from which to get nominal performance.
+ * @nominal_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
+{
+ return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
+}
+
+/**
* cppc_get_perf_caps - Get a CPU's performance capabilities.
* @cpunum: CPU from which to get capabilities info.
* @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 1f6007abcf18..89c22bc55057 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -288,10 +288,18 @@ invalid_guid:
void __init init_prmt(void)
{
+ struct acpi_table_header *tbl;
acpi_status status;
- int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
+ int mc;
+
+ status = acpi_get_table(ACPI_SIG_PRMT, 0, &tbl);
+ if (ACPI_FAILURE(status))
+ return;
+
+ mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
sizeof (struct acpi_table_prmt_header),
0, acpi_parse_prmt, 0);
+ acpi_put_table(tbl);
/*
* Return immediately if PRMT table is not present or no PRM module found.
*/
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index b24513ec3fae..5b54c80b9d32 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -16,7 +16,6 @@
#include <linux/signal.h>
#include <linux/kthread.h>
#include <linux/dmi.h>
-#include <linux/nls.h>
#include <linux/dma-map-ops.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pgtable.h>
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index a37a1532a575..f9383736fa0f 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -583,8 +583,8 @@ void __init acpi_table_upgrade(void)
}
acpi_tables_addr =
- memblock_find_in_range(0, ACPI_TABLE_UPGRADE_MAX_PHYS,
- all_tables_size, PAGE_SIZE);
+ memblock_phys_alloc_range(all_tables_size, PAGE_SIZE,
+ 0, ACPI_TABLE_UPGRADE_MAX_PHYS);
if (!acpi_tables_addr) {
WARN_ON(1);
return;
@@ -599,7 +599,6 @@ void __init acpi_table_upgrade(void)
* Both memblock_reserve and e820__range_add (via arch_reserve_mem_area)
* works fine.
*/
- memblock_reserve(acpi_tables_addr, all_tables_size);
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
/*
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 3a308461246a..bd92b549fd5a 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -449,25 +449,30 @@ int acpi_s2idle_prepare_late(void)
if (pm_debug_messages_on)
lpi_check_constraints();
- if (lps0_dsm_func_mask_microsoft > 0) {
+ /* Screen off */
+ if (lps0_dsm_func_mask > 0)
+ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
+ ACPI_LPS0_SCREEN_OFF_AMD :
+ ACPI_LPS0_SCREEN_OFF,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+
+ if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
- lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+
+ /* LPS0 entry */
+ if (lps0_dsm_func_mask > 0)
+ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
+ ACPI_LPS0_ENTRY_AMD :
+ ACPI_LPS0_ENTRY,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+ if (lps0_dsm_func_mask_microsoft > 0) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- } else if (acpi_s2idle_vendor_amd()) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD,
- lps0_dsm_func_mask, lps0_dsm_guid);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
- lps0_dsm_func_mask, lps0_dsm_guid);
- } else {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
- lps0_dsm_func_mask, lps0_dsm_guid);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
- lps0_dsm_func_mask, lps0_dsm_guid);
+ /* modern standby entry */
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
}
-
return 0;
}
@@ -476,24 +481,30 @@ void acpi_s2idle_restore_early(void)
if (!lps0_device_handle || sleep_no_lps0)
return;
- if (lps0_dsm_func_mask_microsoft > 0) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
- lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ /* Modern standby exit */
+ if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
- lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- } else if (acpi_s2idle_vendor_amd()) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD,
- lps0_dsm_func_mask, lps0_dsm_guid);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD,
- lps0_dsm_func_mask, lps0_dsm_guid);
- } else {
+
+ /* LPS0 exit */
+ if (lps0_dsm_func_mask > 0)
+ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
+ ACPI_LPS0_EXIT_AMD :
+ ACPI_LPS0_EXIT,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+ if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
- lps0_dsm_func_mask, lps0_dsm_guid);
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+
+ /* Screen on */
+ if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
- lps0_dsm_func_mask, lps0_dsm_guid);
- }
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ if (lps0_dsm_func_mask > 0)
+ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
+ ACPI_LPS0_SCREEN_ON_AMD :
+ ACPI_LPS0_SCREEN_ON,
+ lps0_dsm_func_mask, lps0_dsm_guid);
}
static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b8459c54f739..eed65311b5d1 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2186,6 +2186,25 @@ not_supported:
dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
}
+static bool ata_dev_check_adapter(struct ata_device *dev,
+ unsigned short vendor_id)
+{
+ struct pci_dev *pcidev = NULL;
+ struct device *parent_dev = NULL;
+
+ for (parent_dev = dev->tdev.parent; parent_dev != NULL;
+ parent_dev = parent_dev->parent) {
+ if (dev_is_pci(parent_dev)) {
+ pcidev = to_pci_dev(parent_dev);
+ if (pcidev->vendor == vendor_id)
+ return true;
+ break;
+ }
+ }
+
+ return false;
+}
+
static int ata_dev_config_ncq(struct ata_device *dev,
char *desc, size_t desc_sz)
{
@@ -2204,6 +2223,13 @@ static int ata_dev_config_ncq(struct ata_device *dev,
snprintf(desc, desc_sz, "NCQ (not used)");
return 0;
}
+
+ if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
+ ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
+ snprintf(desc, desc_sz, "NCQ (not used)");
+ return 0;
+ }
+
if (ap->flags & ATA_FLAG_NCQ) {
hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
dev->flags |= ATA_DFLAG_NCQ;
@@ -3970,6 +3996,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM |
+ ATA_HORKAGE_NO_NCQ_ON_ATI, },
+ { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM |
+ ATA_HORKAGE_NO_NCQ_ON_ATI, },
{ "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -6124,6 +6156,8 @@ static int __init ata_parse_force_one(char **cur,
{ "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
{ "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
{ "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
+ { "noncqati", .horkage_on = ATA_HORKAGE_NO_NCQ_ON_ATI },
+ { "ncqati", .horkage_off = ATA_HORKAGE_NO_NCQ_ON_ATI },
{ "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
{ "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
{ "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index bb3637762985..bf9c4b6c5c3d 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -912,7 +912,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
* Note that ATA_QCFLAG_FAILED is unconditionally set after
* this function completes.
*/
- blk_abort_request(qc->scsicmd->request);
+ blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
}
/**
@@ -1893,8 +1893,7 @@ static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
*/
static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
{
- if (qc->scsicmd &&
- qc->scsicmd->request->rq_flags & RQF_QUIET)
+ if (qc->scsicmd && scsi_cmd_to_rq(qc->scsicmd)->rq_flags & RQF_QUIET)
qc->flags |= ATA_QCFLAG_QUIET;
return qc->flags & ATA_QCFLAG_QUIET;
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 0b7b4624e4df..1fb4611f7eeb 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -631,7 +631,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
{
struct ata_queued_cmd *qc;
- qc = ata_qc_new_init(dev, cmd->request->tag);
+ qc = ata_qc_new_init(dev, scsi_cmd_to_rq(cmd)->tag);
if (qc) {
qc->scsicmd = cmd;
qc->scsidone = cmd->scsi_done;
@@ -639,7 +639,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
qc->sg = scsi_sglist(cmd);
qc->n_elem = scsi_sg_count(cmd);
- if (cmd->request->rq_flags & RQF_QUIET)
+ if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET)
qc->flags |= ATA_QCFLAG_QUIET;
} else {
cmd->result = (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
@@ -1496,7 +1496,7 @@ nothing_to_do:
static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
{
- struct request *rq = scmd->request;
+ struct request *rq = scsi_cmd_to_rq(scmd);
u32 req_blocks;
if (!blk_rq_is_passthrough(rq))
@@ -1531,7 +1531,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
- struct request *rq = scmd->request;
+ struct request *rq = scsi_cmd_to_rq(scmd);
int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
unsigned int tf_flags = 0;
u64 block;
@@ -3139,7 +3139,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
* as it modifies the DATA OUT buffer, which would corrupt user
* memory for SG_IO commands.
*/
- if (unlikely(blk_rq_is_passthrough(scmd->request)))
+ if (unlikely(blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))))
goto invalid_opcode;
if (unlikely(scmd->cmd_len < 16)) {
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 9d0dd8f4c21c..121635aa8c00 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -48,8 +48,8 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
struct scsi_cmnd *cmd = qc->scsicmd;
bool swap = 1;
- if (dev->class == ATA_DEV_ATA && cmd && cmd->request &&
- !blk_rq_is_passthrough(cmd->request))
+ if (dev->class == ATA_DEV_ATA && cmd &&
+ !blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)))
swap = 0;
/* Transfer multiple of 2 bytes */
diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c
index fd430e6866a1..6526aa51fb1d 100644
--- a/drivers/auxdisplay/cfag12864b.c
+++ b/drivers/auxdisplay/cfag12864b.c
@@ -33,7 +33,7 @@
*/
static unsigned int cfag12864b_rate = CONFIG_CFAG12864B_RATE;
-module_param(cfag12864b_rate, uint, S_IRUGO);
+module_param(cfag12864b_rate, uint, 0444);
MODULE_PARM_DESC(cfag12864b_rate,
"Refresh rate (hertz)");
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 24fd6f369ebe..304accde365c 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -637,9 +637,7 @@ static int panel_notify_sys(struct notifier_block *this, unsigned long code,
}
static struct notifier_block panel_notifier = {
- panel_notify_sys,
- NULL,
- 0
+ .notifier_call = panel_notify_sys,
};
int charlcd_register(struct charlcd *lcd)
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 2e5e7c993933..8b2a0eb3f32a 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -323,8 +323,8 @@ static int hd44780_remove(struct platform_device *pdev)
{
struct charlcd *lcd = platform_get_drvdata(pdev);
- kfree(lcd->drvdata);
charlcd_unregister(lcd);
+ kfree(lcd->drvdata);
kfree(lcd);
return 0;
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
index 03c95ad4216c..e871b94a1911 100644
--- a/drivers/auxdisplay/ks0108.c
+++ b/drivers/auxdisplay/ks0108.c
@@ -28,11 +28,11 @@
*/
static unsigned int ks0108_port = CONFIG_KS0108_PORT;
-module_param(ks0108_port, uint, S_IRUGO);
+module_param(ks0108_port, uint, 0444);
MODULE_PARM_DESC(ks0108_port, "Parallel port where the LCD is connected");
static unsigned int ks0108_delay = CONFIG_KS0108_DELAY;
-module_param(ks0108_delay, uint, S_IRUGO);
+module_param(ks0108_delay, uint, 0444);
MODULE_PARM_DESC(ks0108_delay, "Delay between each control writing (microseconds)");
/*
@@ -167,19 +167,7 @@ static struct parport_driver ks0108_parport_driver = {
.detach = ks0108_parport_detach,
.devmodel = true,
};
-
-static int __init ks0108_init(void)
-{
- return parport_register_driver(&ks0108_parport_driver);
-}
-
-static void __exit ks0108_exit(void)
-{
- parport_unregister_driver(&ks0108_parport_driver);
-}
-
-module_init(ks0108_init);
-module_exit(ks0108_exit);
+module_parport_driver(ks0108_parport_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Miguel Ojeda <ojeda@kernel.org>");
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index 4cc4e117727d..46c503486e96 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -279,13 +279,10 @@ static int __init numa_alloc_distance(void)
int i, j;
size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
- phys = memblock_find_in_range(0, PFN_PHYS(max_pfn),
- size, PAGE_SIZE);
+ phys = memblock_phys_alloc_range(size, PAGE_SIZE, 0, PFN_PHYS(max_pfn));
if (WARN_ON(!phys))
return -ENOMEM;
- memblock_reserve(phys, size);
-
numa_distance = __va(phys);
numa_distance_cnt = nr_node_ids;
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 921312a8d957..43407665918f 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -149,6 +149,7 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
}
DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
{
@@ -165,6 +166,7 @@ void topology_set_thermal_pressure(const struct cpumask *cpus,
for_each_cpu(cpu, cpus)
WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
}
+EXPORT_SYMBOL_GPL(topology_set_thermal_pressure);
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 280425409b40..e65dd803a453 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -27,6 +27,7 @@
#include <linux/netdevice.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
+#include <linux/swiotlb.h>
#include <linux/sysfs.h>
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
@@ -886,6 +887,8 @@ static void device_link_put_kref(struct device_link *link)
{
if (link->flags & DL_FLAG_STATELESS)
kref_put(&link->kref, __device_link_del);
+ else if (!device_is_registered(link->consumer))
+ __device_link_del(&link->kref);
else
WARN(1, "Unable to drop a managed device link reference\n");
}
@@ -2849,6 +2852,9 @@ void device_initialize(struct device *dev)
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
dev->dma_coherent = dma_default_coherent;
#endif
+#ifdef CONFIG_SWIOTLB
+ dev->dma_io_tlb_mem = &io_tlb_default_mem;
+#endif
}
EXPORT_SYMBOL_GPL(device_initialize);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index aa31a21f33d7..365cd4a7f239 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -82,6 +82,12 @@ static struct bus_type memory_subsys = {
*/
static DEFINE_XARRAY(memory_blocks);
+/*
+ * Memory groups, indexed by memory group id (mgid).
+ */
+static DEFINE_XARRAY_FLAGS(memory_groups, XA_FLAGS_ALLOC);
+#define MEMORY_GROUP_MARK_DYNAMIC XA_MARK_1
+
static BLOCKING_NOTIFIER_HEAD(memory_chain);
int register_memory_notifier(struct notifier_block *nb)
@@ -177,7 +183,8 @@ static int memory_block_online(struct memory_block *mem)
struct zone *zone;
int ret;
- zone = zone_for_pfn_range(mem->online_type, mem->nid, start_pfn, nr_pages);
+ zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
+ start_pfn, nr_pages);
/*
* Although vmemmap pages have a different lifecycle than the pages
@@ -193,7 +200,7 @@ static int memory_block_online(struct memory_block *mem)
}
ret = online_pages(start_pfn + nr_vmemmap_pages,
- nr_pages - nr_vmemmap_pages, zone);
+ nr_pages - nr_vmemmap_pages, zone, mem->group);
if (ret) {
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
@@ -205,7 +212,8 @@ static int memory_block_online(struct memory_block *mem)
* now already properly populated.
*/
if (nr_vmemmap_pages)
- adjust_present_page_count(zone, nr_vmemmap_pages);
+ adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
+ nr_vmemmap_pages);
return ret;
}
@@ -215,24 +223,23 @@ static int memory_block_offline(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
- struct zone *zone;
int ret;
/*
* Unaccount before offlining, such that unpopulated zone and kthreads
* can properly be torn down in offline_pages().
*/
- if (nr_vmemmap_pages) {
- zone = page_zone(pfn_to_page(start_pfn));
- adjust_present_page_count(zone, -nr_vmemmap_pages);
- }
+ if (nr_vmemmap_pages)
+ adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
+ -nr_vmemmap_pages);
ret = offline_pages(start_pfn + nr_vmemmap_pages,
- nr_pages - nr_vmemmap_pages);
+ nr_pages - nr_vmemmap_pages, mem->group);
if (ret) {
/* offline_pages() failed. Account back. */
if (nr_vmemmap_pages)
- adjust_present_page_count(zone, nr_vmemmap_pages);
+ adjust_present_page_count(pfn_to_page(start_pfn),
+ mem->group, nr_vmemmap_pages);
return ret;
}
@@ -374,12 +381,13 @@ static ssize_t phys_device_show(struct device *dev,
#ifdef CONFIG_MEMORY_HOTREMOVE
static int print_allowed_zone(char *buf, int len, int nid,
+ struct memory_group *group,
unsigned long start_pfn, unsigned long nr_pages,
int online_type, struct zone *default_zone)
{
struct zone *zone;
- zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
+ zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages);
if (zone == default_zone)
return 0;
@@ -392,9 +400,10 @@ static ssize_t valid_zones_show(struct device *dev,
struct memory_block *mem = to_memory_block(dev);
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ struct memory_group *group = mem->group;
struct zone *default_zone;
+ int nid = mem->nid;
int len = 0;
- int nid;
/*
* Check the existing zone. Make sure that we do that only on the
@@ -413,14 +422,13 @@ static ssize_t valid_zones_show(struct device *dev,
goto out;
}
- nid = mem->nid;
- default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn,
- nr_pages);
+ default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
+ start_pfn, nr_pages);
len += sysfs_emit_at(buf, len, "%s", default_zone->name);
- len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
+ len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
MMOP_ONLINE_KERNEL, default_zone);
- len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
+ len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
MMOP_ONLINE_MOVABLE, default_zone);
out:
len += sysfs_emit_at(buf, len, "\n");
@@ -578,9 +586,9 @@ static struct memory_block *find_memory_block_by_id(unsigned long block_id)
/*
* Called under device_hotplug_lock.
*/
-struct memory_block *find_memory_block(struct mem_section *section)
+struct memory_block *find_memory_block(unsigned long section_nr)
{
- unsigned long block_id = memory_block_id(__section_nr(section));
+ unsigned long block_id = memory_block_id(section_nr);
return find_memory_block_by_id(block_id);
}
@@ -634,7 +642,8 @@ int register_memory(struct memory_block *memory)
}
static int init_memory_block(unsigned long block_id, unsigned long state,
- unsigned long nr_vmemmap_pages)
+ unsigned long nr_vmemmap_pages,
+ struct memory_group *group)
{
struct memory_block *mem;
int ret = 0;
@@ -652,6 +661,12 @@ static int init_memory_block(unsigned long block_id, unsigned long state,
mem->state = state;
mem->nid = NUMA_NO_NODE;
mem->nr_vmemmap_pages = nr_vmemmap_pages;
+ INIT_LIST_HEAD(&mem->group_next);
+
+ if (group) {
+ mem->group = group;
+ list_add(&mem->group_next, &group->memory_blocks);
+ }
ret = register_memory(mem);
@@ -671,7 +686,7 @@ static int add_memory_block(unsigned long base_section_nr)
if (section_count == 0)
return 0;
return init_memory_block(memory_block_id(base_section_nr),
- MEM_ONLINE, 0);
+ MEM_ONLINE, 0, NULL);
}
static void unregister_memory(struct memory_block *memory)
@@ -681,6 +696,11 @@ static void unregister_memory(struct memory_block *memory)
WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
+ if (memory->group) {
+ list_del(&memory->group_next);
+ memory->group = NULL;
+ }
+
/* drop the ref. we got via find_memory_block() */
put_device(&memory->dev);
device_unregister(&memory->dev);
@@ -694,7 +714,8 @@ static void unregister_memory(struct memory_block *memory)
* Called under device_hotplug_lock.
*/
int create_memory_block_devices(unsigned long start, unsigned long size,
- unsigned long vmemmap_pages)
+ unsigned long vmemmap_pages,
+ struct memory_group *group)
{
const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
@@ -707,7 +728,8 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
- ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages);
+ ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages,
+ group);
if (ret)
break;
}
@@ -891,3 +913,164 @@ int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
for_each_memory_block_cb);
}
+
+/*
+ * This is an internal helper to unify allocation and initialization of
+ * memory groups. Note that the passed memory group will be copied to a
+ * dynamically allocated memory group. After this call, the passed
+ * memory group should no longer be used.
+ */
+static int memory_group_register(struct memory_group group)
+{
+ struct memory_group *new_group;
+ uint32_t mgid;
+ int ret;
+
+ if (!node_possible(group.nid))
+ return -EINVAL;
+
+ new_group = kzalloc(sizeof(group), GFP_KERNEL);
+ if (!new_group)
+ return -ENOMEM;
+ *new_group = group;
+ INIT_LIST_HEAD(&new_group->memory_blocks);
+
+ ret = xa_alloc(&memory_groups, &mgid, new_group, xa_limit_31b,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(new_group);
+ return ret;
+ } else if (group.is_dynamic) {
+ xa_set_mark(&memory_groups, mgid, MEMORY_GROUP_MARK_DYNAMIC);
+ }
+ return mgid;
+}
+
+/**
+ * memory_group_register_static() - Register a static memory group.
+ * @nid: The node id.
+ * @max_pages: The maximum number of pages we'll have in this static memory
+ * group.
+ *
+ * Register a new static memory group and return the memory group id.
+ * All memory in the group belongs to a single unit, such as a DIMM. All
+ * memory belonging to a static memory group is added in one go to be removed
+ * in one go -- it's static.
+ *
+ * Returns an error if out of memory, if the node id is invalid, if no new
+ * memory groups can be registered, or if max_pages is invalid (0). Otherwise,
+ * returns the new memory group id.
+ */
+int memory_group_register_static(int nid, unsigned long max_pages)
+{
+ struct memory_group group = {
+ .nid = nid,
+ .s = {
+ .max_pages = max_pages,
+ },
+ };
+
+ if (!max_pages)
+ return -EINVAL;
+ return memory_group_register(group);
+}
+EXPORT_SYMBOL_GPL(memory_group_register_static);
+
+/**
+ * memory_group_register_dynamic() - Register a dynamic memory group.
+ * @nid: The node id.
+ * @unit_pages: Unit in pages in which is memory added/removed in this dynamic
+ * memory group.
+ *
+ * Register a new dynamic memory group and return the memory group id.
+ * Memory within a dynamic memory group is added/removed dynamically
+ * in unit_pages.
+ *
+ * Returns an error if out of memory, if the node id is invalid, if no new
+ * memory groups can be registered, or if unit_pages is invalid (0, not a
+ * power of two, smaller than a single memory block). Otherwise, returns the
+ * new memory group id.
+ */
+int memory_group_register_dynamic(int nid, unsigned long unit_pages)
+{
+ struct memory_group group = {
+ .nid = nid,
+ .is_dynamic = true,
+ .d = {
+ .unit_pages = unit_pages,
+ },
+ };
+
+ if (!unit_pages || !is_power_of_2(unit_pages) ||
+ unit_pages < PHYS_PFN(memory_block_size_bytes()))
+ return -EINVAL;
+ return memory_group_register(group);
+}
+EXPORT_SYMBOL_GPL(memory_group_register_dynamic);
+
+/**
+ * memory_group_unregister() - Unregister a memory group.
+ * @mgid: the memory group id
+ *
+ * Unregister a memory group. If any memory block still belongs to this
+ * memory group, unregistering will fail.
+ *
+ * Returns -EINVAL if the memory group id is invalid, returns -EBUSY if some
+ * memory blocks still belong to this memory group and returns 0 if
+ * unregistering succeeded.
+ */
+int memory_group_unregister(int mgid)
+{
+ struct memory_group *group;
+
+ if (mgid < 0)
+ return -EINVAL;
+
+ group = xa_load(&memory_groups, mgid);
+ if (!group)
+ return -EINVAL;
+ if (!list_empty(&group->memory_blocks))
+ return -EBUSY;
+ xa_erase(&memory_groups, mgid);
+ kfree(group);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(memory_group_unregister);
+
+/*
+ * This is an internal helper only to be used in core memory hotplug code to
+ * lookup a memory group. We don't care about locking, as we don't expect a
+ * memory group to get unregistered while adding memory to it -- because
+ * the group and the memory is managed by the same driver.
+ */
+struct memory_group *memory_group_find_by_id(int mgid)
+{
+ return xa_load(&memory_groups, mgid);
+}
+
+/*
+ * This is an internal helper only to be used in core memory hotplug code to
+ * walk all dynamic memory groups excluding a given memory group, either
+ * belonging to a specific node, or belonging to any node.
+ */
+int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
+ struct memory_group *excluded, void *arg)
+{
+ struct memory_group *group;
+ unsigned long index;
+ int ret = 0;
+
+ xa_for_each_marked(&memory_groups, index, group,
+ MEMORY_GROUP_MARK_DYNAMIC) {
+ if (group == excluded)
+ continue;
+#ifdef CONFIG_NUMA
+ if (nid != NUMA_NO_NODE && group->nid != nid)
+ continue;
+#endif /* CONFIG_NUMA */
+ ret = func(group, arg);
+ if (ret)
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index be16bbff11cc..c56d34f8158f 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -785,8 +785,6 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
static int __ref get_nid_for_pfn(unsigned long pfn)
{
- if (!pfn_valid_within(pfn))
- return -1;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
if (system_state < SYSTEM_RUNNING)
return early_pfn_to_nid(pfn);
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 0251f3e6e61d..4110c19c08dc 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -519,6 +519,23 @@ void pm_clk_destroy(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_clk_destroy);
+static void pm_clk_destroy_action(void *data)
+{
+ pm_clk_destroy(data);
+}
+
+int devm_pm_clk_create(struct device *dev)
+{
+ int ret;
+
+ ret = pm_clk_create(dev);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, pm_clk_destroy_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_clk_create);
+
/**
* pm_clk_suspend - Disable clocks in a device's PM clock list.
* @dev: Device to disable the clocks for.
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index d568772152c2..cbea78e79f3d 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1642,7 +1642,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
dev->power.may_skip_resume = true;
- dev->power.must_resume = false;
+ dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8a66eaf731e4..ec94049442b9 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1447,6 +1447,23 @@ void pm_runtime_enable(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
+static void pm_runtime_disable_action(void *data)
+{
+ pm_runtime_disable(data);
+}
+
+/**
+ * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_enable(struct device *dev)
+{
+ pm_runtime_enable(dev);
+
+ return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
+
/**
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 3bad3266a2ad..b91a3a9bf9f6 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -12,14 +12,11 @@
/**
* dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
* @dev: Device entry
- * @irq: Device wake-up capable interrupt
* @wirq: Wake irq specific data
*
- * Internal function to attach either a device IO interrupt or a
- * dedicated wake-up interrupt as a wake IRQ.
+ * Internal function to attach a dedicated wake-up interrupt as a wake IRQ.
*/
-static int dev_pm_attach_wake_irq(struct device *dev, int irq,
- struct wake_irq *wirq)
+static int dev_pm_attach_wake_irq(struct device *dev, struct wake_irq *wirq)
{
unsigned long flags;
@@ -65,7 +62,7 @@ int dev_pm_set_wake_irq(struct device *dev, int irq)
wirq->dev = dev;
wirq->irq = irq;
- err = dev_pm_attach_wake_irq(dev, irq, wirq);
+ err = dev_pm_attach_wake_irq(dev, wirq);
if (err)
kfree(wirq);
@@ -196,7 +193,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
if (err)
goto err_free_name;
- err = dev_pm_attach_wake_irq(dev, irq, wirq);
+ err = dev_pm_attach_wake_irq(dev, wirq);
if (err)
goto err_free_irq;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index fbb3a558139f..ab3e37aa1830 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -74,7 +74,6 @@ config N64CART
config CDROM
tristate
- select BLK_SCSI_REQUEST
config GDROM
tristate "SEGA Dreamcast GD-ROM drive"
@@ -306,7 +305,7 @@ config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media (DEPRECATED)"
depends on !UML
select CDROM
- select BLK_SCSI_REQUEST
+ select SCSI_COMMON
help
Note: This driver is deprecated and will be removed from the
kernel in the near future!
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index fa1c298a8cfb..7bf4686af774 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -2111,18 +2111,6 @@ int loop_register_transfer(struct loop_func_table *funcs)
return 0;
}
-static int unregister_transfer_cb(int id, void *ptr, void *data)
-{
- struct loop_device *lo = ptr;
- struct loop_func_table *xfer = data;
-
- mutex_lock(&lo->lo_mutex);
- if (lo->lo_encryption == xfer)
- loop_release_xfer(lo);
- mutex_unlock(&lo->lo_mutex);
- return 0;
-}
-
int loop_unregister_transfer(int number)
{
unsigned int n = number;
@@ -2130,9 +2118,20 @@ int loop_unregister_transfer(int number)
if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
return -EINVAL;
+ /*
+ * This function is called from only cleanup_cryptoloop().
+ * Given that each loop device that has a transfer enabled holds a
+ * reference to the module implementing it we should never get here
+ * with a transfer that is set (unless forced module unloading is
+ * requested). Thus, check module's refcount and warn if this is
+ * not a clean unloading.
+ */
+#ifdef CONFIG_MODULE_UNLOAD
+ if (xfer->owner && module_refcount(xfer->owner) != -1)
+ pr_err("Danger! Unregistering an in use transfer function.\n");
+#endif
xfer_funcs[n] = NULL;
- idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
return 0;
}
@@ -2323,8 +2322,9 @@ static int loop_add(int i)
} else {
err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
}
+ mutex_unlock(&loop_ctl_mutex);
if (err < 0)
- goto out_unlock;
+ goto out_free_dev;
i = err;
err = -ENOMEM;
@@ -2393,15 +2393,19 @@ static int loop_add(int i)
disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->event_flags = DISK_EVENT_FLAG_UEVENT;
sprintf(disk->disk_name, "loop%d", i);
+ /* Make this loop device reachable from pathname. */
add_disk(disk);
+ /* Show this loop device. */
+ mutex_lock(&loop_ctl_mutex);
+ lo->idr_visible = true;
mutex_unlock(&loop_ctl_mutex);
return i;
out_cleanup_tags:
blk_mq_free_tag_set(&lo->tag_set);
out_free_idr:
+ mutex_lock(&loop_ctl_mutex);
idr_remove(&loop_index_idr, i);
-out_unlock:
mutex_unlock(&loop_ctl_mutex);
out_free_dev:
kfree(lo);
@@ -2411,9 +2415,14 @@ out:
static void loop_remove(struct loop_device *lo)
{
+ /* Make this loop device unreachable from pathname. */
del_gendisk(lo->lo_disk);
blk_cleanup_disk(lo->lo_disk);
blk_mq_free_tag_set(&lo->tag_set);
+ mutex_lock(&loop_ctl_mutex);
+ idr_remove(&loop_index_idr, lo->lo_number);
+ mutex_unlock(&loop_ctl_mutex);
+ /* There is no route which can find this loop device. */
mutex_destroy(&lo->lo_mutex);
kfree(lo);
}
@@ -2437,31 +2446,40 @@ static int loop_control_remove(int idx)
return -EINVAL;
}
+ /* Hide this loop device for serialization. */
ret = mutex_lock_killable(&loop_ctl_mutex);
if (ret)
return ret;
-
lo = idr_find(&loop_index_idr, idx);
- if (!lo) {
+ if (!lo || !lo->idr_visible)
ret = -ENODEV;
- goto out_unlock_ctrl;
- }
+ else
+ lo->idr_visible = false;
+ mutex_unlock(&loop_ctl_mutex);
+ if (ret)
+ return ret;
+ /* Check whether this loop device can be removed. */
ret = mutex_lock_killable(&lo->lo_mutex);
if (ret)
- goto out_unlock_ctrl;
+ goto mark_visible;
if (lo->lo_state != Lo_unbound ||
atomic_read(&lo->lo_refcnt) > 0) {
mutex_unlock(&lo->lo_mutex);
ret = -EBUSY;
- goto out_unlock_ctrl;
+ goto mark_visible;
}
+ /* Mark this loop device no longer open()-able. */
lo->lo_state = Lo_deleting;
mutex_unlock(&lo->lo_mutex);
- idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo);
-out_unlock_ctrl:
+ return 0;
+
+mark_visible:
+ /* Show this loop device again. */
+ mutex_lock(&loop_ctl_mutex);
+ lo->idr_visible = true;
mutex_unlock(&loop_ctl_mutex);
return ret;
}
@@ -2475,7 +2493,8 @@ static int loop_control_get_free(int idx)
if (ret)
return ret;
idr_for_each_entry(&loop_index_idr, lo, id) {
- if (lo->lo_state == Lo_unbound)
+ /* Hitting a race results in creating a new loop device which is harmless. */
+ if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound)
goto found;
}
mutex_unlock(&loop_ctl_mutex);
@@ -2591,10 +2610,14 @@ static void __exit loop_exit(void)
unregister_blkdev(LOOP_MAJOR, "loop");
misc_deregister(&loop_misc);
- mutex_lock(&loop_ctl_mutex);
+ /*
+ * There is no need to use loop_ctl_mutex here, for nobody else can
+ * access loop_index_idr when this module is unloading (unless forced
+ * module unloading is requested). If this is not a clean unloading,
+ * we have no means to avoid kernel crash.
+ */
idr_for_each_entry(&loop_index_idr, lo, id)
loop_remove(lo);
- mutex_unlock(&loop_ctl_mutex);
idr_destroy(&loop_index_idr);
}
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 1988899db63a..04c88dd6eabd 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -68,6 +68,7 @@ struct loop_device {
struct blk_mq_tag_set tag_set;
struct gendisk *lo_disk;
struct mutex lo_mutex;
+ bool idr_visible;
};
struct loop_cmd {
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index c84be0028f63..26798da661bd 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -129,8 +129,8 @@ static int __init n64cart_probe(struct platform_device *pdev)
}
reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (!reg_base)
- return -EINVAL;
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
index 7c6ae1036927..a295634597ba 100644
--- a/drivers/block/paride/Kconfig
+++ b/drivers/block/paride/Kconfig
@@ -27,7 +27,6 @@ config PARIDE_PCD
tristate "Parallel port ATAPI CD-ROMs"
depends on PARIDE
select CDROM
- select BLK_SCSI_REQUEST # only for the generic cdrom code
help
This option enables the high-level driver for ATAPI CD-ROM devices
connected through a parallel port. If you chose to build PARIDE
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 57c6ae7debd9..9b3bd083b411 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -762,7 +762,7 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_free_vblk;
/* Default queue sizing is to fill the ring. */
- if (likely(!virtblk_queue_depth)) {
+ if (!virtblk_queue_depth) {
queue_depth = vblk->vqs[0].vq->num_free;
/* ... but without indirect descs, we use 2 descs per req */
if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
@@ -836,7 +836,7 @@ static int virtblk_probe(struct virtio_device *vdev)
else
blk_size = queue_logical_block_size(q);
- if (unlikely(blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)) {
+ if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE) {
dev_err(&vdev->dev,
"block size is changed unexpectedly, now is %u\n",
blk_size);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 125b22205d38..33eba3df4dd9 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -8,7 +8,6 @@
#define pr_fmt(fmt) "xen-blkback: " fmt
-#include <stdarg.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <xen/events.h>
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 715bfa8aca7f..72902104f111 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -80,6 +80,7 @@ enum blkif_state {
BLKIF_STATE_DISCONNECTED,
BLKIF_STATE_CONNECTED,
BLKIF_STATE_SUSPENDED,
+ BLKIF_STATE_ERROR,
};
struct grant {
@@ -89,6 +90,7 @@ struct grant {
};
enum blk_req_status {
+ REQ_PROCESSING,
REQ_WAITING,
REQ_DONE,
REQ_ERROR,
@@ -530,10 +532,10 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
id = get_id_from_freelist(rinfo);
rinfo->shadow[id].request = req;
- rinfo->shadow[id].status = REQ_WAITING;
+ rinfo->shadow[id].status = REQ_PROCESSING;
rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
- (*ring_req)->u.rw.id = id;
+ rinfo->shadow[id].req.u.rw.id = id;
return id;
}
@@ -541,11 +543,12 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
{
struct blkfront_info *info = rinfo->dev_info;
- struct blkif_request *ring_req;
+ struct blkif_request *ring_req, *final_ring_req;
unsigned long id;
/* Fill out a communications ring structure. */
- id = blkif_ring_get_request(rinfo, req, &ring_req);
+ id = blkif_ring_get_request(rinfo, req, &final_ring_req);
+ ring_req = &rinfo->shadow[id].req;
ring_req->operation = BLKIF_OP_DISCARD;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
@@ -556,8 +559,9 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
else
ring_req->u.discard.flag = 0;
- /* Keep a private copy so we can reissue requests when recovering. */
- rinfo->shadow[id].req = *ring_req;
+ /* Copy the request to the ring page. */
+ *final_ring_req = *ring_req;
+ rinfo->shadow[id].status = REQ_WAITING;
return 0;
}
@@ -690,6 +694,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
{
struct blkfront_info *info = rinfo->dev_info;
struct blkif_request *ring_req, *extra_ring_req = NULL;
+ struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
unsigned long id, extra_id = NO_ASSOCIATED_ID;
bool require_extra_req = false;
int i;
@@ -734,7 +739,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
}
/* Fill out a communications ring structure. */
- id = blkif_ring_get_request(rinfo, req, &ring_req);
+ id = blkif_ring_get_request(rinfo, req, &final_ring_req);
+ ring_req = &rinfo->shadow[id].req;
num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
num_grant = 0;
@@ -785,7 +791,9 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
ring_req->u.rw.nr_segments = num_grant;
if (unlikely(require_extra_req)) {
extra_id = blkif_ring_get_request(rinfo, req,
- &extra_ring_req);
+ &final_extra_ring_req);
+ extra_ring_req = &rinfo->shadow[extra_id].req;
+
/*
* Only the first request contains the scatter-gather
* list.
@@ -827,10 +835,13 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
if (setup.segments)
kunmap_atomic(setup.segments);
- /* Keep a private copy so we can reissue requests when recovering. */
- rinfo->shadow[id].req = *ring_req;
- if (unlikely(require_extra_req))
- rinfo->shadow[extra_id].req = *extra_ring_req;
+ /* Copy request(s) to the ring page. */
+ *final_ring_req = *ring_req;
+ rinfo->shadow[id].status = REQ_WAITING;
+ if (unlikely(require_extra_req)) {
+ *final_extra_ring_req = *extra_ring_req;
+ rinfo->shadow[extra_id].status = REQ_WAITING;
+ }
if (new_persistent_gnts)
gnttab_free_grant_references(setup.gref_head);
@@ -1353,8 +1364,8 @@ static enum blk_req_status blkif_rsp_to_req_status(int rsp)
static int blkif_get_final_status(enum blk_req_status s1,
enum blk_req_status s2)
{
- BUG_ON(s1 == REQ_WAITING);
- BUG_ON(s2 == REQ_WAITING);
+ BUG_ON(s1 < REQ_DONE);
+ BUG_ON(s2 < REQ_DONE);
if (s1 == REQ_ERROR || s2 == REQ_ERROR)
return BLKIF_RSP_ERROR;
@@ -1387,7 +1398,7 @@ static bool blkif_completion(unsigned long *id,
s->status = blkif_rsp_to_req_status(bret->status);
/* Wait the second response if not yet here. */
- if (s2->status == REQ_WAITING)
+ if (s2->status < REQ_DONE)
return false;
bret->status = blkif_get_final_status(s->status,
@@ -1495,7 +1506,7 @@ static bool blkif_completion(unsigned long *id,
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
{
struct request *req;
- struct blkif_response *bret;
+ struct blkif_response bret;
RING_IDX i, rp;
unsigned long flags;
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
@@ -1506,54 +1517,76 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
spin_lock_irqsave(&rinfo->ring_lock, flags);
again:
- rp = rinfo->ring.sring->rsp_prod;
- rmb(); /* Ensure we see queued responses up to 'rp'. */
+ rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
+ virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
+ if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
+ pr_alert("%s: illegal number of responses %u\n",
+ info->gd->disk_name, rp - rinfo->ring.rsp_cons);
+ goto err;
+ }
for (i = rinfo->ring.rsp_cons; i != rp; i++) {
unsigned long id;
+ unsigned int op;
+
+ RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
+ id = bret.id;
- bret = RING_GET_RESPONSE(&rinfo->ring, i);
- id = bret->id;
/*
* The backend has messed up and given us an id that we would
* never have given to it (we stamp it up to BLK_RING_SIZE -
* look in get_id_from_freelist.
*/
if (id >= BLK_RING_SIZE(info)) {
- WARN(1, "%s: response to %s has incorrect id (%ld)\n",
- info->gd->disk_name, op_name(bret->operation), id);
- /* We can't safely get the 'struct request' as
- * the id is busted. */
- continue;
+ pr_alert("%s: response has incorrect id (%ld)\n",
+ info->gd->disk_name, id);
+ goto err;
}
+ if (rinfo->shadow[id].status != REQ_WAITING) {
+ pr_alert("%s: response references no pending request\n",
+ info->gd->disk_name);
+ goto err;
+ }
+
+ rinfo->shadow[id].status = REQ_PROCESSING;
req = rinfo->shadow[id].request;
- if (bret->operation != BLKIF_OP_DISCARD) {
+ op = rinfo->shadow[id].req.operation;
+ if (op == BLKIF_OP_INDIRECT)
+ op = rinfo->shadow[id].req.u.indirect.indirect_op;
+ if (bret.operation != op) {
+ pr_alert("%s: response has wrong operation (%u instead of %u)\n",
+ info->gd->disk_name, bret.operation, op);
+ goto err;
+ }
+
+ if (bret.operation != BLKIF_OP_DISCARD) {
/*
* We may need to wait for an extra response if the
* I/O request is split in 2
*/
- if (!blkif_completion(&id, rinfo, bret))
+ if (!blkif_completion(&id, rinfo, &bret))
continue;
}
if (add_id_to_freelist(rinfo, id)) {
WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
- info->gd->disk_name, op_name(bret->operation), id);
+ info->gd->disk_name, op_name(bret.operation), id);
continue;
}
- if (bret->status == BLKIF_RSP_OKAY)
+ if (bret.status == BLKIF_RSP_OKAY)
blkif_req(req)->error = BLK_STS_OK;
else
blkif_req(req)->error = BLK_STS_IOERR;
- switch (bret->operation) {
+ switch (bret.operation) {
case BLKIF_OP_DISCARD:
- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
- printk(KERN_WARNING "blkfront: %s: %s op failed\n",
- info->gd->disk_name, op_name(bret->operation));
+
+ pr_warn_ratelimited("blkfront: %s: %s op failed\n",
+ info->gd->disk_name, op_name(bret.operation));
blkif_req(req)->error = BLK_STS_NOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
@@ -1563,15 +1596,15 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
break;
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
- printk(KERN_WARNING "blkfront: %s: %s op failed\n",
- info->gd->disk_name, op_name(bret->operation));
+ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
+ pr_warn_ratelimited("blkfront: %s: %s op failed\n",
+ info->gd->disk_name, op_name(bret.operation));
blkif_req(req)->error = BLK_STS_NOTSUPP;
}
- if (unlikely(bret->status == BLKIF_RSP_ERROR &&
+ if (unlikely(bret.status == BLKIF_RSP_ERROR &&
rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
- printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
- info->gd->disk_name, op_name(bret->operation));
+ pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
+ info->gd->disk_name, op_name(bret.operation));
blkif_req(req)->error = BLK_STS_NOTSUPP;
}
if (unlikely(blkif_req(req)->error)) {
@@ -1584,9 +1617,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
fallthrough;
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
- if (unlikely(bret->status != BLKIF_RSP_OKAY))
- dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
- "request: %x\n", bret->status);
+ if (unlikely(bret.status != BLKIF_RSP_OKAY))
+ dev_dbg_ratelimited(&info->xbdev->dev,
+ "Bad return from blkdev data request: %#x\n",
+ bret.status);
break;
default:
@@ -1612,6 +1646,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return IRQ_HANDLED;
+
+ err:
+ info->connected = BLKIF_STATE_ERROR;
+
+ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
+
+ pr_alert("%s disabled for further use\n", info->gd->disk_name);
+ return IRQ_HANDLED;
}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index feb827eefd1a..bd2e5b1560f5 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -629,7 +629,7 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
if (CDROM_CAN(CDC_MRW_W))
cdi->exit = cdrom_mrw_exit;
- if (cdi->disk)
+ if (cdi->ops->read_cdda_bpc)
cdi->cdda_method = CDDA_BPC_FULL;
else
cdi->cdda_method = CDDA_OLD;
@@ -2159,81 +2159,26 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
int lba, int nframes)
{
- struct request_queue *q = cdi->disk->queue;
- struct request *rq;
- struct scsi_request *req;
- struct bio *bio;
- unsigned int len;
+ int max_frames = (queue_max_sectors(cdi->disk->queue) << 9) /
+ CD_FRAMESIZE_RAW;
int nr, ret = 0;
- if (!q)
- return -ENXIO;
-
- if (!blk_queue_scsi_passthrough(q)) {
- WARN_ONCE(true,
- "Attempt read CDDA info through a non-SCSI queue\n");
- return -EINVAL;
- }
-
cdi->last_sense = 0;
while (nframes) {
- nr = nframes;
if (cdi->cdda_method == CDDA_BPC_SINGLE)
nr = 1;
- if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
- nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
-
- len = nr * CD_FRAMESIZE_RAW;
-
- rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
- break;
- }
- req = scsi_req(rq);
-
- ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
- if (ret) {
- blk_put_request(rq);
- break;
- }
-
- req->cmd[0] = GPCMD_READ_CD;
- req->cmd[1] = 1 << 2;
- req->cmd[2] = (lba >> 24) & 0xff;
- req->cmd[3] = (lba >> 16) & 0xff;
- req->cmd[4] = (lba >> 8) & 0xff;
- req->cmd[5] = lba & 0xff;
- req->cmd[6] = (nr >> 16) & 0xff;
- req->cmd[7] = (nr >> 8) & 0xff;
- req->cmd[8] = nr & 0xff;
- req->cmd[9] = 0xf8;
-
- req->cmd_len = 12;
- rq->timeout = 60 * HZ;
- bio = rq->bio;
-
- blk_execute_rq(cdi->disk, rq, 0);
- if (scsi_req(rq)->result) {
- struct scsi_sense_hdr sshdr;
-
- ret = -EIO;
- scsi_normalize_sense(req->sense, req->sense_len,
- &sshdr);
- cdi->last_sense = sshdr.sense_key;
- }
-
- if (blk_rq_unmap_user(bio))
- ret = -EFAULT;
- blk_put_request(rq);
+ else
+ nr = min(nframes, max_frames);
+ ret = cdi->ops->read_cdda_bpc(cdi, ubuf, lba, nr,
+ &cdi->last_sense);
if (ret)
break;
nframes -= nr;
lba += nr;
- ubuf += len;
+ ubuf += (nr * CD_FRAMESIZE_RAW);
}
return ret;
@@ -3357,13 +3302,6 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
void __user *argp = (void __user *)arg;
int ret;
- /*
- * Try the generic SCSI command ioctl's first.
- */
- ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
- if (ret != -ENOTTY)
- return ret;
-
switch (cmd) {
case CDROMMULTISESSION:
return cdrom_ioctl_multisession(cdi, argp);
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
index 0a4c69539f24..a7ead2a4c753 100644
--- a/drivers/char/ipmi/ipmi_si.h
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -73,7 +73,7 @@ irqreturn_t ipmi_si_irq_handler(int irq, void *data);
void ipmi_irq_start_cleanup(struct si_sm_io *io);
int ipmi_std_irq_setup(struct si_sm_io *io);
void ipmi_irq_finish_setup(struct si_sm_io *io);
-int ipmi_si_remove_by_dev(struct device *dev);
+void ipmi_si_remove_by_dev(struct device *dev);
struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
unsigned long addr);
void ipmi_hardcode_init(void);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 62929a3e397e..6f3272b58ced 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -591,7 +591,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
if (msg[2] != 0) {
/* Error clearing flags */
- dev_warn(smi_info->io.dev,
+ dev_warn_ratelimited(smi_info->io.dev,
"Error clearing flags: %2.2x\n", msg[2]);
}
smi_info->si_state = SI_NORMAL;
@@ -683,10 +683,10 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- dev_warn(smi_info->io.dev,
- "Couldn't get irq info: %x.\n", msg[2]);
- dev_warn(smi_info->io.dev,
- "Maybe ok, but ipmi might run very slowly.\n");
+ dev_warn_ratelimited(smi_info->io.dev,
+ "Couldn't get irq info: %x,\n"
+ "Maybe ok, but ipmi might run very slowly.\n",
+ msg[2]);
smi_info->si_state = SI_NORMAL;
break;
}
@@ -721,7 +721,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0)
- dev_warn(smi_info->io.dev,
+ dev_warn_ratelimited(smi_info->io.dev,
"Could not set the global enables: 0x%x.\n",
msg[2]);
@@ -1343,7 +1343,7 @@ retry:
if (cc != IPMI_CC_NO_ERROR &&
++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
- dev_warn(smi_info->io.dev,
+ dev_warn_ratelimited(smi_info->io.dev,
"BMC returned 0x%2.2x, retry get bmc device id\n",
cc);
goto retry;
@@ -1605,7 +1605,7 @@ static ssize_t name##_show(struct device *dev, \
\
return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
} \
-static DEVICE_ATTR(name, 0444, name##_show, NULL)
+static DEVICE_ATTR_RO(name)
static ssize_t type_show(struct device *dev,
struct device_attribute *attr,
@@ -1615,7 +1615,7 @@ static ssize_t type_show(struct device *dev,
return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
}
-static DEVICE_ATTR(type, 0444, type_show, NULL);
+static DEVICE_ATTR_RO(type);
static ssize_t interrupts_enabled_show(struct device *dev,
struct device_attribute *attr,
@@ -1626,8 +1626,7 @@ static ssize_t interrupts_enabled_show(struct device *dev,
return snprintf(buf, 10, "%d\n", enabled);
}
-static DEVICE_ATTR(interrupts_enabled, 0444,
- interrupts_enabled_show, NULL);
+static DEVICE_ATTR_RO(interrupts_enabled);
IPMI_SI_ATTR(short_timeouts);
IPMI_SI_ATTR(long_timeouts);
@@ -1658,7 +1657,7 @@ static ssize_t params_show(struct device *dev,
smi_info->io.irq,
smi_info->io.slave_addr);
}
-static DEVICE_ATTR(params, 0444, params_show, NULL);
+static DEVICE_ATTR_RO(params);
static struct attribute *ipmi_si_dev_attrs[] = {
&dev_attr_type.attr,
@@ -2228,22 +2227,18 @@ static void cleanup_one_si(struct smi_info *smi_info)
kfree(smi_info);
}
-int ipmi_si_remove_by_dev(struct device *dev)
+void ipmi_si_remove_by_dev(struct device *dev)
{
struct smi_info *e;
- int rv = -ENOENT;
mutex_lock(&smi_infos_lock);
list_for_each_entry(e, &smi_infos, link) {
if (e->io.dev == dev) {
cleanup_one_si(e);
- rv = 0;
break;
}
}
mutex_unlock(&smi_infos_lock);
-
- return rv;
}
struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
index 11c9160275df..2be2967f6b5f 100644
--- a/drivers/char/ipmi/ipmi_si_parisc.c
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -29,9 +29,9 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev)
return ipmi_si_add_smi(&io);
}
-static int __exit ipmi_parisc_remove(struct parisc_device *dev)
+static void __exit ipmi_parisc_remove(struct parisc_device *dev)
{
- return ipmi_si_remove_by_dev(&dev->dev);
+ ipmi_si_remove_by_dev(&dev->dev);
}
static const struct parisc_device_id ipmi_parisc_tbl[] __initconst = {
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 380a6a542890..505cc978c97a 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -411,7 +411,9 @@ static int ipmi_probe(struct platform_device *pdev)
static int ipmi_remove(struct platform_device *pdev)
{
- return ipmi_si_remove_by_dev(&pdev->dev);
+ ipmi_si_remove_by_dev(&pdev->dev);
+
+ return 0;
}
static int pdev_match_name(struct device *dev, const void *data)
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index e873f9ea2e65..c5b3dc97396a 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -403,6 +403,7 @@ source "drivers/clk/mediatek/Kconfig"
source "drivers/clk/meson/Kconfig"
source "drivers/clk/mstar/Kconfig"
source "drivers/clk/mvebu/Kconfig"
+source "drivers/clk/pistachio/Kconfig"
source "drivers/clk/qcom/Kconfig"
source "drivers/clk/ralink/Kconfig"
source "drivers/clk/renesas/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 2b91d34c582b..e42312121e51 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -97,7 +97,7 @@ obj-y += mstar/
obj-y += mvebu/
obj-$(CONFIG_ARCH_MXS) += mxs/
obj-$(CONFIG_COMMON_CLK_NXP) += nxp/
-obj-$(CONFIG_MACH_PISTACHIO) += pistachio/
+obj-$(CONFIG_COMMON_CLK_PISTACHIO) += pistachio/
obj-$(CONFIG_COMMON_CLK_PXA) += pxa/
obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/
obj-y += ralink/
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index b4fc8d71daf2..b656d25a9767 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -128,6 +128,12 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
int i;
u32 div;
+ /* do not look for a rate that is outside of our range */
+ if (gck->range.max && req->rate > gck->range.max)
+ req->rate = gck->range.max;
+ if (gck->range.min && req->rate < gck->range.min)
+ req->rate = gck->range.min;
+
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
if (gck->chg_pid == i)
continue;
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index 9e1ec48c4474..cf8c079aa086 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -35,7 +35,7 @@ static DEFINE_SPINLOCK(pmc_pll_lock);
static DEFINE_SPINLOCK(pmc_mck0_lock);
static DEFINE_SPINLOCK(pmc_mckX_lock);
-/**
+/*
* PLL clocks identifiers
* @PLL_ID_CPU: CPU PLL identifier
* @PLL_ID_SYS: System PLL identifier
@@ -56,7 +56,7 @@ enum pll_ids {
PLL_ID_MAX,
};
-/**
+/*
* PLL type identifiers
* @PLL_TYPE_FRAC: fractional PLL identifier
* @PLL_TYPE_DIV: divider PLL identifier
@@ -118,7 +118,7 @@ static const struct clk_pll_characteristics pll_characteristics = {
.output = pll_outputs,
};
-/**
+/*
* PLL clocks description
* @n: clock name
* @p: clock parent
@@ -285,7 +285,7 @@ static const struct {
},
};
-/**
+/*
* Master clock (MCK[1..4]) description
* @n: clock name
* @ep: extra parents names array
@@ -337,7 +337,7 @@ static const struct {
.c = 1, },
};
-/**
+/*
* System clock description
* @n: clock name
* @p: clock parent name
@@ -361,7 +361,7 @@ static const struct {
/* Mux table for programmable clocks. */
static u32 sama7g5_prog_mux_table[] = { 0, 1, 2, 5, 6, 7, 8, 9, 10, };
-/**
+/*
* Peripheral clock description
* @n: clock name
* @p: clock parent name
@@ -449,7 +449,7 @@ static const struct {
{ .n = "uhphs_clk", .p = "mck1", .id = 106, },
};
-/**
+/*
* Generic clock description
* @n: clock name
* @pp: PLL parents
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 1ac803e14fa3..a254512965eb 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -805,11 +805,10 @@ static int bcm2835_pll_divider_is_on(struct clk_hw *hw)
return !(cprman_read(cprman, data->a2w_reg) & A2W_PLL_CHANNEL_DISABLE);
}
-static long bcm2835_pll_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int bcm2835_pll_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return clk_divider_ops.round_rate(hw, rate, parent_rate);
+ return clk_divider_ops.determine_rate(hw, req);
}
static unsigned long bcm2835_pll_divider_get_rate(struct clk_hw *hw,
@@ -901,7 +900,7 @@ static const struct clk_ops bcm2835_pll_divider_clk_ops = {
.unprepare = bcm2835_pll_divider_off,
.recalc_rate = bcm2835_pll_divider_get_rate,
.set_rate = bcm2835_pll_divider_set_rate,
- .round_rate = bcm2835_pll_divider_round_rate,
+ .determine_rate = bcm2835_pll_divider_determine_rate,
.debug_init = bcm2835_pll_divider_debug_init,
};
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 87ba4966b0e8..f6b2bf558486 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -446,6 +446,27 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
divider->width, divider->flags);
}
+static int clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+
+ /* if read only, just return current value */
+ if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+ u32 val;
+
+ val = clk_div_readl(divider) >> divider->shift;
+ val &= clk_div_mask(divider->width);
+
+ return divider_ro_determine_rate(hw, req, divider->table,
+ divider->width,
+ divider->flags, val);
+ }
+
+ return divider_determine_rate(hw, req, divider->table, divider->width,
+ divider->flags);
+}
+
int divider_get_val(unsigned long rate, unsigned long parent_rate,
const struct clk_div_table *table, u8 width,
unsigned long flags)
@@ -501,6 +522,7 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_divider_ops = {
.recalc_rate = clk_divider_recalc_rate,
.round_rate = clk_divider_round_rate,
+ .determine_rate = clk_divider_determine_rate,
.set_rate = clk_divider_set_rate,
};
EXPORT_SYMBOL_GPL(clk_divider_ops);
@@ -508,6 +530,7 @@ EXPORT_SYMBOL_GPL(clk_divider_ops);
const struct clk_ops clk_divider_ro_ops = {
.recalc_rate = clk_divider_recalc_rate,
.round_rate = clk_divider_round_rate,
+ .determine_rate = clk_divider_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index b1e556f20911..4274540327ce 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -3,8 +3,39 @@
* Copyright (C) 2014 Intel Corporation
*
* Adjustable fractional divider clock implementation.
- * Output rate = (m / n) * parent_rate.
* Uses rational best approximation algorithm.
+ *
+ * Output is calculated as
+ *
+ * rate = (m / n) * parent_rate (1)
+ *
+ * This is useful when we have a prescaler block which asks for
+ * m (numerator) and n (denominator) values to be provided to satisfy
+ * the (1) as much as possible.
+ *
+ * Since m and n have the limitation by a range, e.g.
+ *
+ * n >= 1, n < N_width, where N_width = 2^nwidth (2)
+ *
+ * for some cases the output may be saturated. Hence, from (1) and (2),
+ * assuming the worst case when m = 1, the inequality
+ *
+ * floor(log2(parent_rate / rate)) <= nwidth (3)
+ *
+ * may be derived. Thus, in cases when
+ *
+ * (parent_rate / rate) >> N_width (4)
+ *
+ * we might scale up the rate by 2^scale (see the description of
+ * CLK_FRAC_DIVIDER_POWER_OF_TWO_PS for additional information), where
+ *
+ * scale = floor(log2(parent_rate / rate)) - nwidth (5)
+ *
+ * and assume that the IP, that needs m and n, has also its own
+ * prescaler, which is capable to divide by 2^scale. In this way
+ * we get the denominator to satisfy the desired range (2) and
+ * at the same time much much better result of m and n than simple
+ * saturated values.
*/
#include <linux/clk-provider.h>
@@ -14,6 +45,8 @@
#include <linux/slab.h>
#include <linux/rational.h>
+#include "clk-fractional-divider.h"
+
static inline u32 clk_fd_readl(struct clk_fractional_divider *fd)
{
if (fd->flags & CLK_FRAC_DIVIDER_BIG_ENDIAN)
@@ -68,21 +101,26 @@ static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
return ret;
}
-static void clk_fd_general_approximation(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate,
- unsigned long *m, unsigned long *n)
+void clk_fractional_divider_general_approximation(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate,
+ unsigned long *m, unsigned long *n)
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
- unsigned long scale;
/*
* Get rate closer to *parent_rate to guarantee there is no overflow
* for m and n. In the result it will be the nearest rate left shifted
* by (scale - fd->nwidth) bits.
+ *
+ * For the detailed explanation see the top comment in this file.
*/
- scale = fls_long(*parent_rate / rate - 1);
- if (scale > fd->nwidth)
- rate <<= scale - fd->nwidth;
+ if (fd->flags & CLK_FRAC_DIVIDER_POWER_OF_TWO_PS) {
+ unsigned long scale = fls_long(*parent_rate / rate - 1);
+
+ if (scale > fd->nwidth)
+ rate <<= scale - fd->nwidth;
+ }
rational_best_approximation(rate, *parent_rate,
GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
@@ -102,7 +140,7 @@ static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
if (fd->approximation)
fd->approximation(hw, rate, parent_rate, &m, &n);
else
- clk_fd_general_approximation(hw, rate, parent_rate, &m, &n);
+ clk_fractional_divider_general_approximation(hw, rate, parent_rate, &m, &n);
ret = (u64)*parent_rate * m;
do_div(ret, n);
diff --git a/drivers/clk/clk-fractional-divider.h b/drivers/clk/clk-fractional-divider.h
new file mode 100644
index 000000000000..f0f71d23797b
--- /dev/null
+++ b/drivers/clk/clk-fractional-divider.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CLK_FRACTIONAL_DIV_H
+#define _CLK_FRACTIONAL_DIV_H
+
+struct clk_hw;
+
+extern const struct clk_ops clk_fractional_divider_ops;
+
+void clk_fractional_divider_general_approximation(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate,
+ unsigned long *m,
+ unsigned long *n);
+
+#endif
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index c7a3a029fb1e..8f02c0b88000 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -269,23 +269,14 @@ static bool lmk04832_regmap_rd_regs(struct device *dev, unsigned int reg)
{
switch (reg) {
case LMK04832_REG_RST3W ... LMK04832_REG_ID_MASKREV:
- fallthrough;
case LMK04832_REG_ID_VNDR_MSB:
- fallthrough;
case LMK04832_REG_ID_VNDR_LSB:
- fallthrough;
case LMK04832_REG_CLKOUT_CTRL0(0) ... LMK04832_REG_PLL2_DLD_CNT_LSB:
- fallthrough;
case LMK04832_REG_PLL2_LD:
- fallthrough;
case LMK04832_REG_PLL2_PD:
- fallthrough;
case LMK04832_REG_PLL1R_RST:
- fallthrough;
case LMK04832_REG_CLR_PLL_LOST ... LMK04832_REG_RB_DAC_VAL_LSB:
- fallthrough;
case LMK04832_REG_RB_HOLDOVER:
- fallthrough;
case LMK04832_REG_SPI_LOCK:
return true;
default:
@@ -297,27 +288,18 @@ static bool lmk04832_regmap_wr_regs(struct device *dev, unsigned int reg)
{
switch (reg) {
case LMK04832_REG_RST3W:
- fallthrough;
case LMK04832_REG_POWERDOWN:
return true;
case LMK04832_REG_ID_DEV_TYPE ... LMK04832_REG_ID_MASKREV:
- fallthrough;
case LMK04832_REG_ID_VNDR_MSB:
- fallthrough;
case LMK04832_REG_ID_VNDR_LSB:
return false;
case LMK04832_REG_CLKOUT_CTRL0(0) ... LMK04832_REG_PLL2_DLD_CNT_LSB:
- fallthrough;
case LMK04832_REG_PLL2_LD:
- fallthrough;
case LMK04832_REG_PLL2_PD:
- fallthrough;
case LMK04832_REG_PLL1R_RST:
- fallthrough;
case LMK04832_REG_CLR_PLL_LOST ... LMK04832_REG_RB_DAC_VAL_LSB:
- fallthrough;
case LMK04832_REG_RB_HOLDOVER:
- fallthrough;
case LMK04832_REG_SPI_LOCK:
return true;
default:
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index e41a3a9f7528..b8c3d0da1918 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Clock driver for Palmas device.
*
@@ -6,15 +7,6 @@
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
* Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 5c75e3d906c2..af46176ad053 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -709,10 +709,10 @@ static unsigned long stm32f4_pll_div_recalc_rate(struct clk_hw *hw,
return clk_divider_ops.recalc_rate(hw, parent_rate);
}
-static long stm32f4_pll_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int stm32f4_pll_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return clk_divider_ops.round_rate(hw, rate, prate);
+ return clk_divider_ops.determine_rate(hw, req);
}
static int stm32f4_pll_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -738,7 +738,7 @@ static int stm32f4_pll_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops stm32f4_pll_div_ops = {
.recalc_rate = stm32f4_pll_div_recalc_rate,
- .round_rate = stm32f4_pll_div_round_rate,
+ .determine_rate = stm32f4_pll_div_determine_rate,
.set_rate = stm32f4_pll_div_set_rate,
};
diff --git a/drivers/clk/clk-stm32h7.c b/drivers/clk/clk-stm32h7.c
index 0ea7261d15e0..1a701eada0c1 100644
--- a/drivers/clk/clk-stm32h7.c
+++ b/drivers/clk/clk-stm32h7.c
@@ -845,10 +845,10 @@ static unsigned long odf_divider_recalc_rate(struct clk_hw *hw,
return clk_divider_ops.recalc_rate(hw, parent_rate);
}
-static long odf_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int odf_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return clk_divider_ops.round_rate(hw, rate, prate);
+ return clk_divider_ops.determine_rate(hw, req);
}
static int odf_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -875,7 +875,7 @@ static int odf_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops odf_divider_ops = {
.recalc_rate = odf_divider_recalc_rate,
- .round_rate = odf_divider_round_rate,
+ .determine_rate = odf_divider_determine_rate,
.set_rate = odf_divider_set_rate,
};
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index 256575bd29b9..4bd1fe7d8af4 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -1076,14 +1076,10 @@ static int clk_divider_rtc_set_rate(struct clk_hw *hw, unsigned long rate,
static int clk_divider_rtc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
- unsigned long best_parent_rate = req->best_parent_rate;
+ if (req->best_parent_hw == clk_hw_get_parent_by_index(hw, HSE_RTC))
+ return clk_divider_ops.determine_rate(hw, req);
- if (req->best_parent_hw == clk_hw_get_parent_by_index(hw, HSE_RTC)) {
- req->rate = clk_divider_ops.round_rate(hw, req->rate, &best_parent_rate);
- req->best_parent_rate = best_parent_rate;
- } else {
- req->rate = best_parent_rate;
- }
+ req->rate = req->best_parent_rate;
return 0;
}
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 3c737742c2a9..c6d3b1ab3d55 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -907,6 +907,7 @@ static const struct of_device_id clk_vc5_of_match[];
static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
+ unsigned int oe, sd, src_mask = 0, src_val = 0;
struct vc5_driver_data *vc5;
struct clk_init_data init;
const char *parent_names[2];
@@ -930,11 +931,33 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -EPROBE_DEFER;
vc5->regmap = devm_regmap_init_i2c(client, &vc5_regmap_config);
- if (IS_ERR(vc5->regmap)) {
- dev_err(&client->dev, "failed to allocate register map\n");
- return PTR_ERR(vc5->regmap);
+ if (IS_ERR(vc5->regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(vc5->regmap),
+ "failed to allocate register map\n");
+
+ ret = of_property_read_u32(client->dev.of_node, "idt,shutdown", &sd);
+ if (!ret) {
+ src_mask |= VC5_PRIM_SRC_SHDN_EN_GBL_SHDN;
+ if (sd)
+ src_val |= VC5_PRIM_SRC_SHDN_EN_GBL_SHDN;
+ } else if (ret != -EINVAL) {
+ return dev_err_probe(&client->dev, ret,
+ "could not read idt,shutdown\n");
}
+ ret = of_property_read_u32(client->dev.of_node,
+ "idt,output-enable-active", &oe);
+ if (!ret) {
+ src_mask |= VC5_PRIM_SRC_SHDN_SP;
+ if (oe)
+ src_val |= VC5_PRIM_SRC_SHDN_SP;
+ } else if (ret != -EINVAL) {
+ return dev_err_probe(&client->dev, ret,
+ "could not read idt,output-enable-active\n");
+ }
+
+ regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, src_mask, src_val);
+
/* Register clock input mux */
memset(&init, 0, sizeof(init));
@@ -957,10 +980,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
__clk_get_name(vc5->pin_clkin);
}
- if (!init.num_parents) {
- dev_err(&client->dev, "no input clock specified!\n");
- return -EINVAL;
- }
+ if (!init.num_parents)
+ return dev_err_probe(&client->dev, -EINVAL,
+ "no input clock specified!\n");
/* Configure Optional Loading Capacitance for external XTAL */
if (!(vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL)) {
@@ -1099,14 +1121,16 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
ret = of_clk_add_hw_provider(client->dev.of_node, vc5_of_clk_get, vc5);
if (ret) {
- dev_err(&client->dev, "unable to add clk provider\n");
+ dev_err_probe(&client->dev, ret,
+ "unable to add clk provider\n");
goto err_clk;
}
return 0;
err_clk_register:
- dev_err(&client->dev, "unable to register %s\n", init.name);
+ dev_err_probe(&client->dev, ret,
+ "unable to register %s\n", init.name);
kfree(init.name); /* clock framework made a copy of the name */
err_clk:
if (vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL)
diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c
index 7c4f31b31eb0..d85ba78abbb1 100644
--- a/drivers/clk/imx/clk-composite-7ulp.c
+++ b/drivers/clk/imx/clk-composite-7ulp.c
@@ -10,6 +10,7 @@
#include <linux/err.h>
#include <linux/slab.h>
+#include "../clk-fractional-divider.h"
#include "clk.h"
#define PCG_PCS_SHIFT 24
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 2c309e3dc8e3..04e728538cef 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -216,7 +216,8 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
div->width = PCG_PREDIV_WIDTH;
divider_ops = &imx8m_clk_composite_divider_ops;
mux_ops = &clk_mux_ops;
- flags |= CLK_SET_PARENT_GATE;
+ if (!(composite_flags & IMX_COMPOSITE_FW_MANAGED))
+ flags |= CLK_SET_PARENT_GATE;
}
div->lock = &imx_ccm_lock;
diff --git a/drivers/clk/imx/clk-divider-gate.c b/drivers/clk/imx/clk-divider-gate.c
index 0322a843d245..26b210cba9be 100644
--- a/drivers/clk/imx/clk-divider-gate.c
+++ b/drivers/clk/imx/clk-divider-gate.c
@@ -64,10 +64,10 @@ static unsigned long clk_divider_gate_recalc_rate(struct clk_hw *hw,
div->flags, div->width);
}
-static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return clk_divider_ops.round_rate(hw, rate, prate);
+ return clk_divider_ops.determine_rate(hw, req);
}
static int clk_divider_gate_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -154,12 +154,12 @@ static int clk_divider_is_enabled(struct clk_hw *hw)
static const struct clk_ops clk_divider_gate_ro_ops = {
.recalc_rate = clk_divider_gate_recalc_rate_ro,
- .round_rate = clk_divider_round_rate,
+ .determine_rate = clk_divider_determine_rate,
};
static const struct clk_ops clk_divider_gate_ops = {
.recalc_rate = clk_divider_gate_recalc_rate,
- .round_rate = clk_divider_round_rate,
+ .determine_rate = clk_divider_determine_rate,
.set_rate = clk_divider_gate_set_rate,
.enable = clk_divider_enable,
.disable = clk_divider_disable,
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index f1919fafb124..e92621fa8b9c 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -407,10 +407,10 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
hws[IMX8MM_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
- hws[IMX8MM_CLK_CLKOUT1_SEL] = imx_clk_hw_mux("clkout1_sel", base + 0x128, 4, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
+ hws[IMX8MM_CLK_CLKOUT1_SEL] = imx_clk_hw_mux2("clkout1_sel", base + 0x128, 4, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
hws[IMX8MM_CLK_CLKOUT1_DIV] = imx_clk_hw_divider("clkout1_div", "clkout1_sel", base + 0x128, 0, 4);
hws[IMX8MM_CLK_CLKOUT1] = imx_clk_hw_gate("clkout1", "clkout1_div", base + 0x128, 8);
- hws[IMX8MM_CLK_CLKOUT2_SEL] = imx_clk_hw_mux("clkout2_sel", base + 0x128, 20, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
+ hws[IMX8MM_CLK_CLKOUT2_SEL] = imx_clk_hw_mux2("clkout2_sel", base + 0x128, 20, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
hws[IMX8MM_CLK_CLKOUT2_DIV] = imx_clk_hw_divider("clkout2_div", "clkout2_sel", base + 0x128, 16, 4);
hws[IMX8MM_CLK_CLKOUT2] = imx_clk_hw_gate("clkout2", "clkout2_div", base + 0x128, 24);
@@ -470,10 +470,11 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
/*
* DRAM clocks are manipulated from TF-A outside clock framework.
- * Mark with GET_RATE_NOCACHE to always read div value from hardware
+ * The fw_managed helper sets GET_RATE_NOCACHE and clears SET_PARENT_GATE
+ * as div value should always be read from hardware
*/
- hws[IMX8MM_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
- hws[IMX8MM_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mm_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
+ hws[IMX8MM_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000);
+ hws[IMX8MM_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mm_dram_apb_sels, base + 0xa080);
/* IP */
hws[IMX8MM_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100);
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 88f6630cd472..c55577604e16 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -40,6 +40,9 @@ static const char * const imx8mn_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pl
static const char * const imx8mn_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", };
+static const char * const imx8mn_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "vpu_pll_out",
+ "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", };
+
static const char * const imx8mn_gpu_core_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
"sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
"video_pll1_out", "audio_pll2_out", };
@@ -402,10 +405,10 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
hws[IMX8MN_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
- hws[IMX8MN_CLK_CLKOUT1_SEL] = imx_clk_hw_mux("clkout1_sel", base + 0x128, 4, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
+ hws[IMX8MN_CLK_CLKOUT1_SEL] = imx_clk_hw_mux2("clkout1_sel", base + 0x128, 4, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
hws[IMX8MN_CLK_CLKOUT1_DIV] = imx_clk_hw_divider("clkout1_div", "clkout1_sel", base + 0x128, 0, 4);
hws[IMX8MN_CLK_CLKOUT1] = imx_clk_hw_gate("clkout1", "clkout1_div", base + 0x128, 8);
- hws[IMX8MN_CLK_CLKOUT2_SEL] = imx_clk_hw_mux("clkout2_sel", base + 0x128, 20, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
+ hws[IMX8MN_CLK_CLKOUT2_SEL] = imx_clk_hw_mux2("clkout2_sel", base + 0x128, 20, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
hws[IMX8MN_CLK_CLKOUT2_DIV] = imx_clk_hw_divider("clkout2_div", "clkout2_sel", base + 0x128, 16, 4);
hws[IMX8MN_CLK_CLKOUT2] = imx_clk_hw_gate("clkout2", "clkout2_div", base + 0x128, 24);
@@ -421,6 +424,8 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_A53_SRC] = hws[IMX8MN_CLK_A53_DIV];
hws[IMX8MN_CLK_A53_CG] = hws[IMX8MN_CLK_A53_DIV];
+ hws[IMX8MN_CLK_M7_CORE] = imx8m_clk_hw_composite_core("arm_m7_core", imx8mn_m7_sels, base + 0x8080);
+
hws[IMX8MN_CLK_GPU_CORE] = imx8m_clk_hw_composite_core("gpu_core", imx8mn_gpu_core_sels, base + 0x8180);
hws[IMX8MN_CLK_GPU_SHADER] = imx8m_clk_hw_composite_core("gpu_shader", imx8mn_gpu_shader_sels, base + 0x8200);
@@ -453,10 +458,11 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
/*
* DRAM clocks are manipulated from TF-A outside clock framework.
- * Mark with GET_RATE_NOCACHE to always read div value from hardware
+ * The fw_managed helper sets GET_RATE_NOCACHE and clears SET_PARENT_GATE
+ * as div value should always be read from hardware
*/
- hws[IMX8MN_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
- hws[IMX8MN_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mn_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
+ hws[IMX8MN_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000);
+ hws[IMX8MN_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mn_dram_apb_sels, base + 0xa080);
hws[IMX8MN_CLK_DISP_PIXEL] = imx8m_clk_hw_composite("disp_pixel", imx8mn_disp_pixel_sels, base + 0xa500);
hws[IMX8MN_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mn_sai2_sels, base + 0xa600);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index c491bc9c61ce..83cc2b1c3294 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -449,11 +449,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
/*
* DRAM clocks are manipulated from TF-A outside clock framework.
- * Mark with GET_RATE_NOCACHE to always read div value from hardware
+ * The fw_managed helper sets GET_RATE_NOCACHE and clears SET_PARENT_GATE
+ * as div value should always be read from hardware
*/
hws[IMX8MQ_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mq_dram_core_sels, ARRAY_SIZE(imx8mq_dram_core_sels), CLK_IS_CRITICAL);
- hws[IMX8MQ_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
- hws[IMX8MQ_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mq_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
+ hws[IMX8MQ_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000);
+ hws[IMX8MQ_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mq_dram_apb_sels, base + 0xa080);
/* IP */
hws[IMX8MQ_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mq_vpu_g1_sels, base + 0xa100);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 7571603bee23..e144f983fd8c 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -530,8 +530,9 @@ struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
struct clk *div, struct clk *mux, struct clk *pll,
struct clk *step);
-#define IMX_COMPOSITE_CORE BIT(0)
-#define IMX_COMPOSITE_BUS BIT(1)
+#define IMX_COMPOSITE_CORE BIT(0)
+#define IMX_COMPOSITE_BUS BIT(1)
+#define IMX_COMPOSITE_FW_MANAGED BIT(2)
struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
const char * const *parent_names,
@@ -567,6 +568,17 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
ARRAY_SIZE(parent_names), reg, 0, \
flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+#define __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, flags) \
+ imx8m_clk_hw_composite_flags(name, parent_names, \
+ ARRAY_SIZE(parent_names), reg, IMX_COMPOSITE_FW_MANAGED, \
+ flags | CLK_GET_RATE_NOCACHE | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+
+#define imx8m_clk_hw_fw_managed_composite(name, parent_names, reg) \
+ __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, 0)
+
+#define imx8m_clk_hw_fw_managed_composite_critical(name, parent_names, reg) \
+ __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, CLK_IS_CRITICAL)
+
#define __imx8m_clk_composite(name, parent_names, reg, flags) \
to_clk(__imx8m_clk_hw_composite(name, parent_names, reg, flags))
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 886e2d9fced5..439b7c8d0d07 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -362,41 +362,36 @@ config COMMON_CLK_MT8167
config COMMON_CLK_MT8167_AUDSYS
bool "Clock driver for MediaTek MT8167 audsys"
- depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
- select COMMON_CLK_MEDIATEK
- default ARCH_MEDIATEK
+ depends on COMMON_CLK_MT8167
+ default COMMON_CLK_MT8167
help
This driver supports MediaTek MT8167 audsys clocks.
config COMMON_CLK_MT8167_IMGSYS
bool "Clock driver for MediaTek MT8167 imgsys"
- depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
- select COMMON_CLK_MEDIATEK
- default ARCH_MEDIATEK
+ depends on COMMON_CLK_MT8167
+ default COMMON_CLK_MT8167
help
This driver supports MediaTek MT8167 imgsys clocks.
config COMMON_CLK_MT8167_MFGCFG
bool "Clock driver for MediaTek MT8167 mfgcfg"
- depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
- select COMMON_CLK_MEDIATEK
- default ARCH_MEDIATEK
+ depends on COMMON_CLK_MT8167
+ default COMMON_CLK_MT8167
help
This driver supports MediaTek MT8167 mfgcfg clocks.
config COMMON_CLK_MT8167_MMSYS
bool "Clock driver for MediaTek MT8167 mmsys"
- depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
- select COMMON_CLK_MEDIATEK
- default ARCH_MEDIATEK
+ depends on COMMON_CLK_MT8167
+ default COMMON_CLK_MT8167
help
This driver supports MediaTek MT8167 mmsys clocks.
config COMMON_CLK_MT8167_VDECSYS
bool "Clock driver for MediaTek MT8167 vdecsys"
- depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
- select COMMON_CLK_MEDIATEK
- default ARCH_MEDIATEK
+ depends on COMMON_CLK_MT8167
+ default COMMON_CLK_MT8167
help
This driver supports MediaTek MT8167 vdecsys clocks.
@@ -500,6 +495,86 @@ config COMMON_CLK_MT8183_VENCSYS
help
This driver supports MediaTek MT8183 vencsys clocks.
+config COMMON_CLK_MT8192
+ bool "Clock driver for MediaTek MT8192"
+ depends on ARM64 || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARM64
+ help
+ This driver supports MediaTek MT8192 basic clocks.
+
+config COMMON_CLK_MT8192_AUDSYS
+ bool "Clock driver for MediaTek MT8192 audsys"
+ depends on COMMON_CLK_MT8192
+ help
+ This driver supports MediaTek MT8192 audsys clocks.
+
+config COMMON_CLK_MT8192_CAMSYS
+ bool "Clock driver for MediaTek MT8192 camsys"
+ depends on COMMON_CLK_MT8192
+ help
+ This driver supports MediaTek MT8192 camsys and camsys_raw clocks.
+
+config COMMON_CLK_MT8192_IMGSYS
+ bool "Clock driver for MediaTek MT8192 imgsys"
+ depends on COMMON_CLK_MT8192
+ help
+ This driver supports MediaTek MT8192 imgsys and imgsys2 clocks.
+
+config COMMON_CLK_MT8192_IMP_IIC_WRAP
+ bool "Clock driver for MediaTek MT8192 imp_iic_wrap"
+ depends on COMMON_CLK_MT8192
+ help
+ This driver supports MediaTek MT8192 imp_iic_wrap clocks.
+
+config COMMON_CLK_MT8192_IPESYS
+ bool "Clock driver for MediaTek MT8192 ipesys"
+ depends on COMMON_CLK_MT8192
+ help
+ This driver supports MediaTek MT8192 ipesys clocks.
+
+config COMMON_CLK_MT8192_MDPSYS
+ bool "Clock driver for MediaTek MT8192 mdpsys"
+ depends on COMMON_CLK_MT8192
+ help
+ This driver supports MediaTek MT8192 mdpsys clocks.
+
+config COMMON_CLK_MT8192_MFGCFG
+ bool "Clock driver for MediaTek MT8192 mfgcfg"
+ depends on COMMON_CLK_MT8192
+ help
+ This driver supports MediaTek MT8192 mfgcfg clocks.
+
+config COMMON_CLK_MT8192_MMSYS
+ bool "Clock driver for MediaTek MT8192 mmsys"
+ depends on COMMON_CLK_MT8192
+ help
+ This driver supports MediaTek MT8192 mmsys clocks.
+
+config COMMON_CLK_MT8192_MSDC
+ bool "Clock driver for MediaTek MT8192 msdc"
+ depends on COMMON_CLK_MT8192
+ help
+ This driver supports MediaTek MT8192 msdc and msdc_top clocks.
+
+config COMMON_CLK_MT8192_SCP_ADSP
+ bool "Clock driver for MediaTek MT8192 scp_adsp"
+ depends on COMMON_CLK_MT8192
+ help
+ This driver supports MediaTek MT8192 scp_adsp clocks.
+
+config COMMON_CLK_MT8192_VDECSYS
+ bool "Clock driver for MediaTek MT8192 vdecsys"
+ depends on COMMON_CLK_MT8192
+ help
+ This driver supports MediaTek MT8192 vdecsys and vdecsys_soc clocks.
+
+config COMMON_CLK_MT8192_VENCSYS
+ bool "Clock driver for MediaTek MT8192 vencsys"
+ depends on COMMON_CLK_MT8192
+ help
+ This driver supports MediaTek MT8192 vencsys clocks.
+
config COMMON_CLK_MT8516
bool "Clock driver for MediaTek MT8516"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 3b0c2be73824..15bc045f0b71 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -67,5 +67,18 @@ obj-$(CONFIG_COMMON_CLK_MT8183_MFGCFG) += clk-mt8183-mfgcfg.o
obj-$(CONFIG_COMMON_CLK_MT8183_MMSYS) += clk-mt8183-mm.o
obj-$(CONFIG_COMMON_CLK_MT8183_VDECSYS) += clk-mt8183-vdec.o
obj-$(CONFIG_COMMON_CLK_MT8183_VENCSYS) += clk-mt8183-venc.o
+obj-$(CONFIG_COMMON_CLK_MT8192) += clk-mt8192.o
+obj-$(CONFIG_COMMON_CLK_MT8192_AUDSYS) += clk-mt8192-aud.o
+obj-$(CONFIG_COMMON_CLK_MT8192_CAMSYS) += clk-mt8192-cam.o
+obj-$(CONFIG_COMMON_CLK_MT8192_IMGSYS) += clk-mt8192-img.o
+obj-$(CONFIG_COMMON_CLK_MT8192_IMP_IIC_WRAP) += clk-mt8192-imp_iic_wrap.o
+obj-$(CONFIG_COMMON_CLK_MT8192_IPESYS) += clk-mt8192-ipe.o
+obj-$(CONFIG_COMMON_CLK_MT8192_MDPSYS) += clk-mt8192-mdp.o
+obj-$(CONFIG_COMMON_CLK_MT8192_MFGCFG) += clk-mt8192-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT8192_MMSYS) += clk-mt8192-mm.o
+obj-$(CONFIG_COMMON_CLK_MT8192_MSDC) += clk-mt8192-msdc.o
+obj-$(CONFIG_COMMON_CLK_MT8192_SCP_ADSP) += clk-mt8192-scp_adsp.o
+obj-$(CONFIG_COMMON_CLK_MT8192_VDECSYS) += clk-mt8192-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT8192_VENCSYS) += clk-mt8192-venc.o
obj-$(CONFIG_COMMON_CLK_MT8516) += clk-mt8516.o
obj-$(CONFIG_COMMON_CLK_MT8516_AUDSYS) += clk-mt8516-aud.o
diff --git a/drivers/clk/mediatek/clk-cpumux.c b/drivers/clk/mediatek/clk-cpumux.c
index 79fe09028742..61eeae4e60fb 100644
--- a/drivers/clk/mediatek/clk-cpumux.c
+++ b/drivers/clk/mediatek/clk-cpumux.c
@@ -84,7 +84,7 @@ int mtk_clk_register_cpumuxes(struct device_node *node,
struct clk *clk;
struct regmap *regmap;
- regmap = syscon_node_to_regmap(node);
+ regmap = device_node_to_regmap(node);
if (IS_ERR(regmap)) {
pr_err("Cannot find regmap for %pOF: %ld\n", node,
PTR_ERR(regmap));
diff --git a/drivers/clk/mediatek/clk-mt8192-aud.c b/drivers/clk/mediatek/clk-mt8192-aud.c
new file mode 100644
index 000000000000..f28d56628045
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8192-aud.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8192-clk.h>
+
+static const struct mtk_gate_regs aud0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs aud1_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x4,
+};
+
+static const struct mtk_gate_regs aud2_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_AUD0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &aud0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+
+#define GATE_AUD1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &aud1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+
+#define GATE_AUD2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &aud2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate aud_clks[] = {
+ /* AUD0 */
+ GATE_AUD0(CLK_AUD_AFE, "aud_afe", "audio_sel", 2),
+ GATE_AUD0(CLK_AUD_22M, "aud_22m", "aud_engen1_sel", 8),
+ GATE_AUD0(CLK_AUD_24M, "aud_24m", "aud_engen2_sel", 9),
+ GATE_AUD0(CLK_AUD_APLL2_TUNER, "aud_apll2_tuner", "aud_engen2_sel", 18),
+ GATE_AUD0(CLK_AUD_APLL_TUNER, "aud_apll_tuner", "aud_engen1_sel", 19),
+ GATE_AUD0(CLK_AUD_TDM, "aud_tdm", "aud_1_sel", 20),
+ GATE_AUD0(CLK_AUD_ADC, "aud_adc", "audio_sel", 24),
+ GATE_AUD0(CLK_AUD_DAC, "aud_dac", "audio_sel", 25),
+ GATE_AUD0(CLK_AUD_DAC_PREDIS, "aud_dac_predis", "audio_sel", 26),
+ GATE_AUD0(CLK_AUD_TML, "aud_tml", "audio_sel", 27),
+ GATE_AUD0(CLK_AUD_NLE, "aud_nle", "audio_sel", 28),
+ /* AUD1 */
+ GATE_AUD1(CLK_AUD_I2S1_B, "aud_i2s1_b", "audio_sel", 4),
+ GATE_AUD1(CLK_AUD_I2S2_B, "aud_i2s2_b", "audio_sel", 5),
+ GATE_AUD1(CLK_AUD_I2S3_B, "aud_i2s3_b", "audio_sel", 6),
+ GATE_AUD1(CLK_AUD_I2S4_B, "aud_i2s4_b", "audio_sel", 7),
+ GATE_AUD1(CLK_AUD_CONNSYS_I2S_ASRC, "aud_connsys_i2s_asrc", "audio_sel", 12),
+ GATE_AUD1(CLK_AUD_GENERAL1_ASRC, "aud_general1_asrc", "audio_sel", 13),
+ GATE_AUD1(CLK_AUD_GENERAL2_ASRC, "aud_general2_asrc", "audio_sel", 14),
+ GATE_AUD1(CLK_AUD_DAC_HIRES, "aud_dac_hires", "audio_h_sel", 15),
+ GATE_AUD1(CLK_AUD_ADC_HIRES, "aud_adc_hires", "audio_h_sel", 16),
+ GATE_AUD1(CLK_AUD_ADC_HIRES_TML, "aud_adc_hires_tml", "audio_h_sel", 17),
+ GATE_AUD1(CLK_AUD_ADDA6_ADC, "aud_adda6_adc", "audio_sel", 20),
+ GATE_AUD1(CLK_AUD_ADDA6_ADC_HIRES, "aud_adda6_adc_hires", "audio_h_sel", 21),
+ GATE_AUD1(CLK_AUD_3RD_DAC, "aud_3rd_dac", "audio_sel", 28),
+ GATE_AUD1(CLK_AUD_3RD_DAC_PREDIS, "aud_3rd_dac_predis", "audio_sel", 29),
+ GATE_AUD1(CLK_AUD_3RD_DAC_TML, "aud_3rd_dac_tml", "audio_sel", 30),
+ GATE_AUD1(CLK_AUD_3RD_DAC_HIRES, "aud_3rd_dac_hires", "audio_h_sel", 31),
+ /* AUD2 */
+ GATE_AUD2(CLK_AUD_I2S5_B, "aud_i2s5_b", "audio_sel", 0),
+ GATE_AUD2(CLK_AUD_I2S6_B, "aud_i2s6_b", "audio_sel", 1),
+ GATE_AUD2(CLK_AUD_I2S7_B, "aud_i2s7_b", "audio_sel", 2),
+ GATE_AUD2(CLK_AUD_I2S8_B, "aud_i2s8_b", "audio_sel", 3),
+ GATE_AUD2(CLK_AUD_I2S9_B, "aud_i2s9_b", "audio_sel", 4),
+};
+
+static int clk_mt8192_aud_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
+ if (r)
+ return r;
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ return r;
+
+ r = devm_of_platform_populate(&pdev->dev);
+ if (r)
+ of_clk_del_provider(node);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt8192_aud[] = {
+ { .compatible = "mediatek,mt8192-audsys", },
+ {}
+};
+
+static struct platform_driver clk_mt8192_aud_drv = {
+ .probe = clk_mt8192_aud_probe,
+ .driver = {
+ .name = "clk-mt8192-aud",
+ .of_match_table = of_match_clk_mt8192_aud,
+ },
+};
+
+builtin_platform_driver(clk_mt8192_aud_drv);
diff --git a/drivers/clk/mediatek/clk-mt8192-cam.c b/drivers/clk/mediatek/clk-mt8192-cam.c
new file mode 100644
index 000000000000..fc74cd80b4b0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8192-cam.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8192-clk.h>
+
+static const struct mtk_gate_regs cam_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_CAM(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate cam_clks[] = {
+ GATE_CAM(CLK_CAM_LARB13, "cam_larb13", "cam_sel", 0),
+ GATE_CAM(CLK_CAM_DFP_VAD, "cam_dfp_vad", "cam_sel", 1),
+ GATE_CAM(CLK_CAM_LARB14, "cam_larb14", "cam_sel", 2),
+ GATE_CAM(CLK_CAM_CAM, "cam_cam", "cam_sel", 6),
+ GATE_CAM(CLK_CAM_CAMTG, "cam_camtg", "cam_sel", 7),
+ GATE_CAM(CLK_CAM_SENINF, "cam_seninf", "cam_sel", 8),
+ GATE_CAM(CLK_CAM_CAMSV0, "cam_camsv0", "cam_sel", 9),
+ GATE_CAM(CLK_CAM_CAMSV1, "cam_camsv1", "cam_sel", 10),
+ GATE_CAM(CLK_CAM_CAMSV2, "cam_camsv2", "cam_sel", 11),
+ GATE_CAM(CLK_CAM_CAMSV3, "cam_camsv3", "cam_sel", 12),
+ GATE_CAM(CLK_CAM_CCU0, "cam_ccu0", "cam_sel", 13),
+ GATE_CAM(CLK_CAM_CCU1, "cam_ccu1", "cam_sel", 14),
+ GATE_CAM(CLK_CAM_MRAW0, "cam_mraw0", "cam_sel", 15),
+ GATE_CAM(CLK_CAM_FAKE_ENG, "cam_fake_eng", "cam_sel", 17),
+ GATE_CAM(CLK_CAM_CCU_GALS, "cam_ccu_gals", "cam_sel", 18),
+ GATE_CAM(CLK_CAM_CAM2MM_GALS, "cam2mm_gals", "cam_sel", 19),
+};
+
+static const struct mtk_gate cam_rawa_clks[] = {
+ GATE_CAM(CLK_CAM_RAWA_LARBX, "cam_rawa_larbx", "cam_sel", 0),
+ GATE_CAM(CLK_CAM_RAWA_CAM, "cam_rawa_cam", "cam_sel", 1),
+ GATE_CAM(CLK_CAM_RAWA_CAMTG, "cam_rawa_camtg", "cam_sel", 2),
+};
+
+static const struct mtk_gate cam_rawb_clks[] = {
+ GATE_CAM(CLK_CAM_RAWB_LARBX, "cam_rawb_larbx", "cam_sel", 0),
+ GATE_CAM(CLK_CAM_RAWB_CAM, "cam_rawb_cam", "cam_sel", 1),
+ GATE_CAM(CLK_CAM_RAWB_CAMTG, "cam_rawb_camtg", "cam_sel", 2),
+};
+
+static const struct mtk_gate cam_rawc_clks[] = {
+ GATE_CAM(CLK_CAM_RAWC_LARBX, "cam_rawc_larbx", "cam_sel", 0),
+ GATE_CAM(CLK_CAM_RAWC_CAM, "cam_rawc_cam", "cam_sel", 1),
+ GATE_CAM(CLK_CAM_RAWC_CAMTG, "cam_rawc_camtg", "cam_sel", 2),
+};
+
+static const struct mtk_clk_desc cam_desc = {
+ .clks = cam_clks,
+ .num_clks = ARRAY_SIZE(cam_clks),
+};
+
+static const struct mtk_clk_desc cam_rawa_desc = {
+ .clks = cam_rawa_clks,
+ .num_clks = ARRAY_SIZE(cam_rawa_clks),
+};
+
+static const struct mtk_clk_desc cam_rawb_desc = {
+ .clks = cam_rawb_clks,
+ .num_clks = ARRAY_SIZE(cam_rawb_clks),
+};
+
+static const struct mtk_clk_desc cam_rawc_desc = {
+ .clks = cam_rawc_clks,
+ .num_clks = ARRAY_SIZE(cam_rawc_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8192_cam[] = {
+ {
+ .compatible = "mediatek,mt8192-camsys",
+ .data = &cam_desc,
+ }, {
+ .compatible = "mediatek,mt8192-camsys_rawa",
+ .data = &cam_rawa_desc,
+ }, {
+ .compatible = "mediatek,mt8192-camsys_rawb",
+ .data = &cam_rawb_desc,
+ }, {
+ .compatible = "mediatek,mt8192-camsys_rawc",
+ .data = &cam_rawc_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8192_cam_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8192-cam",
+ .of_match_table = of_match_clk_mt8192_cam,
+ },
+};
+
+builtin_platform_driver(clk_mt8192_cam_drv);
diff --git a/drivers/clk/mediatek/clk-mt8192-img.c b/drivers/clk/mediatek/clk-mt8192-img.c
new file mode 100644
index 000000000000..7ce3abe42577
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8192-img.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8192-clk.h>
+
+static const struct mtk_gate_regs img_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate img_clks[] = {
+ GATE_IMG(CLK_IMG_LARB9, "img_larb9", "img1_sel", 0),
+ GATE_IMG(CLK_IMG_LARB10, "img_larb10", "img1_sel", 1),
+ GATE_IMG(CLK_IMG_DIP, "img_dip", "img1_sel", 2),
+ GATE_IMG(CLK_IMG_GALS, "img_gals", "img1_sel", 12),
+};
+
+static const struct mtk_gate img2_clks[] = {
+ GATE_IMG(CLK_IMG2_LARB11, "img2_larb11", "img1_sel", 0),
+ GATE_IMG(CLK_IMG2_LARB12, "img2_larb12", "img1_sel", 1),
+ GATE_IMG(CLK_IMG2_MFB, "img2_mfb", "img1_sel", 6),
+ GATE_IMG(CLK_IMG2_WPE, "img2_wpe", "img1_sel", 7),
+ GATE_IMG(CLK_IMG2_MSS, "img2_mss", "img1_sel", 8),
+ GATE_IMG(CLK_IMG2_GALS, "img2_gals", "img1_sel", 12),
+};
+
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
+};
+
+static const struct mtk_clk_desc img2_desc = {
+ .clks = img2_clks,
+ .num_clks = ARRAY_SIZE(img2_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8192_img[] = {
+ {
+ .compatible = "mediatek,mt8192-imgsys",
+ .data = &img_desc,
+ }, {
+ .compatible = "mediatek,mt8192-imgsys2",
+ .data = &img2_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8192_img_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8192-img",
+ .of_match_table = of_match_clk_mt8192_img,
+ },
+};
+
+builtin_platform_driver(clk_mt8192_img_drv);
diff --git a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
new file mode 100644
index 000000000000..700356ac6a58
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8192-clk.h>
+
+static const struct mtk_gate_regs imp_iic_wrap_cg_regs = {
+ .set_ofs = 0xe08,
+ .clr_ofs = 0xe04,
+ .sta_ofs = 0xe00,
+};
+
+#define GATE_IMP_IIC_WRAP(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &imp_iic_wrap_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, CLK_OPS_PARENT_ENABLE)
+
+static const struct mtk_gate imp_iic_wrap_c_clks[] = {
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_C_I2C10, "imp_iic_wrap_c_i2c10", "infra_i2c0", 0),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_C_I2C11, "imp_iic_wrap_c_i2c11", "infra_i2c0", 1),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_C_I2C12, "imp_iic_wrap_c_i2c12", "infra_i2c0", 2),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_C_I2C13, "imp_iic_wrap_c_i2c13", "infra_i2c0", 3),
+};
+
+static const struct mtk_gate imp_iic_wrap_e_clks[] = {
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_E_I2C3, "imp_iic_wrap_e_i2c3", "infra_i2c0", 0),
+};
+
+static const struct mtk_gate imp_iic_wrap_n_clks[] = {
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_N_I2C0, "imp_iic_wrap_n_i2c0", "infra_i2c0", 0),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_N_I2C6, "imp_iic_wrap_n_i2c6", "infra_i2c0", 1),
+};
+
+static const struct mtk_gate imp_iic_wrap_s_clks[] = {
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_S_I2C7, "imp_iic_wrap_s_i2c7", "infra_i2c0", 0),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_S_I2C8, "imp_iic_wrap_s_i2c8", "infra_i2c0", 1),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_S_I2C9, "imp_iic_wrap_s_i2c9", "infra_i2c0", 2),
+};
+
+static const struct mtk_gate imp_iic_wrap_w_clks[] = {
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_W_I2C5, "imp_iic_wrap_w_i2c5", "infra_i2c0", 0),
+};
+
+static const struct mtk_gate imp_iic_wrap_ws_clks[] = {
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_WS_I2C1, "imp_iic_wrap_ws_i2c1", "infra_i2c0", 0),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_WS_I2C2, "imp_iic_wrap_ws_i2c2", "infra_i2c0", 1),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_WS_I2C4, "imp_iic_wrap_ws_i2c4", "infra_i2c0", 2),
+};
+
+static const struct mtk_clk_desc imp_iic_wrap_c_desc = {
+ .clks = imp_iic_wrap_c_clks,
+ .num_clks = ARRAY_SIZE(imp_iic_wrap_c_clks),
+};
+
+static const struct mtk_clk_desc imp_iic_wrap_e_desc = {
+ .clks = imp_iic_wrap_e_clks,
+ .num_clks = ARRAY_SIZE(imp_iic_wrap_e_clks),
+};
+
+static const struct mtk_clk_desc imp_iic_wrap_n_desc = {
+ .clks = imp_iic_wrap_n_clks,
+ .num_clks = ARRAY_SIZE(imp_iic_wrap_n_clks),
+};
+
+static const struct mtk_clk_desc imp_iic_wrap_s_desc = {
+ .clks = imp_iic_wrap_s_clks,
+ .num_clks = ARRAY_SIZE(imp_iic_wrap_s_clks),
+};
+
+static const struct mtk_clk_desc imp_iic_wrap_w_desc = {
+ .clks = imp_iic_wrap_w_clks,
+ .num_clks = ARRAY_SIZE(imp_iic_wrap_w_clks),
+};
+
+static const struct mtk_clk_desc imp_iic_wrap_ws_desc = {
+ .clks = imp_iic_wrap_ws_clks,
+ .num_clks = ARRAY_SIZE(imp_iic_wrap_ws_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8192_imp_iic_wrap[] = {
+ {
+ .compatible = "mediatek,mt8192-imp_iic_wrap_c",
+ .data = &imp_iic_wrap_c_desc,
+ }, {
+ .compatible = "mediatek,mt8192-imp_iic_wrap_e",
+ .data = &imp_iic_wrap_e_desc,
+ }, {
+ .compatible = "mediatek,mt8192-imp_iic_wrap_n",
+ .data = &imp_iic_wrap_n_desc,
+ }, {
+ .compatible = "mediatek,mt8192-imp_iic_wrap_s",
+ .data = &imp_iic_wrap_s_desc,
+ }, {
+ .compatible = "mediatek,mt8192-imp_iic_wrap_w",
+ .data = &imp_iic_wrap_w_desc,
+ }, {
+ .compatible = "mediatek,mt8192-imp_iic_wrap_ws",
+ .data = &imp_iic_wrap_ws_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8192_imp_iic_wrap_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8192-imp_iic_wrap",
+ .of_match_table = of_match_clk_mt8192_imp_iic_wrap,
+ },
+};
+
+builtin_platform_driver(clk_mt8192_imp_iic_wrap_drv);
diff --git a/drivers/clk/mediatek/clk-mt8192-ipe.c b/drivers/clk/mediatek/clk-mt8192-ipe.c
new file mode 100644
index 000000000000..730d91b64b3f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8192-ipe.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8192-clk.h>
+
+static const struct mtk_gate_regs ipe_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IPE(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipe_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate ipe_clks[] = {
+ GATE_IPE(CLK_IPE_LARB19, "ipe_larb19", "ipe_sel", 0),
+ GATE_IPE(CLK_IPE_LARB20, "ipe_larb20", "ipe_sel", 1),
+ GATE_IPE(CLK_IPE_SMI_SUBCOM, "ipe_smi_subcom", "ipe_sel", 2),
+ GATE_IPE(CLK_IPE_FD, "ipe_fd", "ipe_sel", 3),
+ GATE_IPE(CLK_IPE_FE, "ipe_fe", "ipe_sel", 4),
+ GATE_IPE(CLK_IPE_RSC, "ipe_rsc", "ipe_sel", 5),
+ GATE_IPE(CLK_IPE_DPE, "ipe_dpe", "ipe_sel", 6),
+ GATE_IPE(CLK_IPE_GALS, "ipe_gals", "ipe_sel", 8),
+};
+
+static const struct mtk_clk_desc ipe_desc = {
+ .clks = ipe_clks,
+ .num_clks = ARRAY_SIZE(ipe_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8192_ipe[] = {
+ {
+ .compatible = "mediatek,mt8192-ipesys",
+ .data = &ipe_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8192_ipe_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8192-ipe",
+ .of_match_table = of_match_clk_mt8192_ipe,
+ },
+};
+
+builtin_platform_driver(clk_mt8192_ipe_drv);
diff --git a/drivers/clk/mediatek/clk-mt8192-mdp.c b/drivers/clk/mediatek/clk-mt8192-mdp.c
new file mode 100644
index 000000000000..93c87ae2f332
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8192-mdp.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8192-clk.h>
+
+static const struct mtk_gate_regs mdp0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mdp1_cg_regs = {
+ .set_ofs = 0x124,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x120,
+};
+
+#define GATE_MDP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mdp0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_MDP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mdp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mdp_clks[] = {
+ /* MDP0 */
+ GATE_MDP0(CLK_MDP_RDMA0, "mdp_mdp_rdma0", "mdp_sel", 0),
+ GATE_MDP0(CLK_MDP_TDSHP0, "mdp_mdp_tdshp0", "mdp_sel", 1),
+ GATE_MDP0(CLK_MDP_IMG_DL_ASYNC0, "mdp_img_dl_async0", "mdp_sel", 2),
+ GATE_MDP0(CLK_MDP_IMG_DL_ASYNC1, "mdp_img_dl_async1", "mdp_sel", 3),
+ GATE_MDP0(CLK_MDP_RDMA1, "mdp_mdp_rdma1", "mdp_sel", 4),
+ GATE_MDP0(CLK_MDP_TDSHP1, "mdp_mdp_tdshp1", "mdp_sel", 5),
+ GATE_MDP0(CLK_MDP_SMI0, "mdp_smi0", "mdp_sel", 6),
+ GATE_MDP0(CLK_MDP_APB_BUS, "mdp_apb_bus", "mdp_sel", 7),
+ GATE_MDP0(CLK_MDP_WROT0, "mdp_mdp_wrot0", "mdp_sel", 8),
+ GATE_MDP0(CLK_MDP_RSZ0, "mdp_mdp_rsz0", "mdp_sel", 9),
+ GATE_MDP0(CLK_MDP_HDR0, "mdp_mdp_hdr0", "mdp_sel", 10),
+ GATE_MDP0(CLK_MDP_MUTEX0, "mdp_mdp_mutex0", "mdp_sel", 11),
+ GATE_MDP0(CLK_MDP_WROT1, "mdp_mdp_wrot1", "mdp_sel", 12),
+ GATE_MDP0(CLK_MDP_RSZ1, "mdp_mdp_rsz1", "mdp_sel", 13),
+ GATE_MDP0(CLK_MDP_HDR1, "mdp_mdp_hdr1", "mdp_sel", 14),
+ GATE_MDP0(CLK_MDP_FAKE_ENG0, "mdp_mdp_fake_eng0", "mdp_sel", 15),
+ GATE_MDP0(CLK_MDP_AAL0, "mdp_mdp_aal0", "mdp_sel", 16),
+ GATE_MDP0(CLK_MDP_AAL1, "mdp_mdp_aal1", "mdp_sel", 17),
+ GATE_MDP0(CLK_MDP_COLOR0, "mdp_mdp_color0", "mdp_sel", 18),
+ GATE_MDP0(CLK_MDP_COLOR1, "mdp_mdp_color1", "mdp_sel", 19),
+ /* MDP1 */
+ GATE_MDP1(CLK_MDP_IMG_DL_RELAY0_ASYNC0, "mdp_img_dl_relay0_async0", "mdp_sel", 0),
+ GATE_MDP1(CLK_MDP_IMG_DL_RELAY1_ASYNC1, "mdp_img_dl_relay1_async1", "mdp_sel", 8),
+};
+
+static const struct mtk_clk_desc mdp_desc = {
+ .clks = mdp_clks,
+ .num_clks = ARRAY_SIZE(mdp_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8192_mdp[] = {
+ {
+ .compatible = "mediatek,mt8192-mdpsys",
+ .data = &mdp_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8192_mdp_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8192-mdp",
+ .of_match_table = of_match_clk_mt8192_mdp,
+ },
+};
+
+builtin_platform_driver(clk_mt8192_mdp_drv);
diff --git a/drivers/clk/mediatek/clk-mt8192-mfg.c b/drivers/clk/mediatek/clk-mt8192-mfg.c
new file mode 100644
index 000000000000..3bbc7469f0e4
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8192-mfg.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8192-clk.h>
+
+static const struct mtk_gate_regs mfg_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mfg_clks[] = {
+ GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_pll_sel", 0),
+};
+
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8192_mfg[] = {
+ {
+ .compatible = "mediatek,mt8192-mfgcfg",
+ .data = &mfg_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8192_mfg_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8192-mfg",
+ .of_match_table = of_match_clk_mt8192_mfg,
+ },
+};
+
+builtin_platform_driver(clk_mt8192_mfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt8192-mm.c b/drivers/clk/mediatek/clk-mt8192-mm.c
new file mode 100644
index 000000000000..4a0b4c4bc06a
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8192-mm.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8192-clk.h>
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mm2_cg_regs = {
+ .set_ofs = 0x1a4,
+ .clr_ofs = 0x1a8,
+ .sta_ofs = 0x1a0,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_MM2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_DISP_MUTEX0, "mm_disp_mutex0", "disp_sel", 0),
+ GATE_MM0(CLK_MM_DISP_CONFIG, "mm_disp_config", "disp_sel", 1),
+ GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "disp_sel", 2),
+ GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "disp_sel", 3),
+ GATE_MM0(CLK_MM_DISP_OVL0_2L, "mm_disp_ovl0_2l", "disp_sel", 4),
+ GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "disp_sel", 5),
+ GATE_MM0(CLK_MM_DISP_UFBC_WDMA0, "mm_disp_ufbc_wdma0", "disp_sel", 6),
+ GATE_MM0(CLK_MM_DISP_RSZ0, "mm_disp_rsz0", "disp_sel", 7),
+ GATE_MM0(CLK_MM_DISP_AAL0, "mm_disp_aal0", "disp_sel", 8),
+ GATE_MM0(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "disp_sel", 9),
+ GATE_MM0(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "disp_sel", 10),
+ GATE_MM0(CLK_MM_SMI_INFRA, "mm_smi_infra", "disp_sel", 11),
+ GATE_MM0(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "disp_sel", 12),
+ GATE_MM0(CLK_MM_DISP_POSTMASK0, "mm_disp_postmask0", "disp_sel", 13),
+ GATE_MM0(CLK_MM_DISP_DSC_WRAP0, "mm_disp_dsc_wrap0", "disp_sel", 14),
+ GATE_MM0(CLK_MM_DSI0, "mm_dsi0", "disp_sel", 15),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "disp_sel", 16),
+ GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "disp_sel", 17),
+ GATE_MM0(CLK_MM_DISP_FAKE_ENG0, "mm_disp_fake_eng0", "disp_sel", 18),
+ GATE_MM0(CLK_MM_DISP_FAKE_ENG1, "mm_disp_fake_eng1", "disp_sel", 19),
+ GATE_MM0(CLK_MM_MDP_TDSHP4, "mm_mdp_tdshp4", "disp_sel", 20),
+ GATE_MM0(CLK_MM_MDP_RSZ4, "mm_mdp_rsz4", "disp_sel", 21),
+ GATE_MM0(CLK_MM_MDP_AAL4, "mm_mdp_aal4", "disp_sel", 22),
+ GATE_MM0(CLK_MM_MDP_HDR4, "mm_mdp_hdr4", "disp_sel", 23),
+ GATE_MM0(CLK_MM_MDP_RDMA4, "mm_mdp_rdma4", "disp_sel", 24),
+ GATE_MM0(CLK_MM_MDP_COLOR4, "mm_mdp_color4", "disp_sel", 25),
+ GATE_MM0(CLK_MM_DISP_Y2R0, "mm_disp_y2r0", "disp_sel", 26),
+ GATE_MM0(CLK_MM_SMI_GALS, "mm_smi_gals", "disp_sel", 27),
+ GATE_MM0(CLK_MM_DISP_OVL2_2L, "mm_disp_ovl2_2l", "disp_sel", 28),
+ GATE_MM0(CLK_MM_DISP_RDMA4, "mm_disp_rdma4", "disp_sel", 29),
+ GATE_MM0(CLK_MM_DISP_DPI0, "mm_disp_dpi0", "disp_sel", 30),
+ /* MM1 */
+ GATE_MM1(CLK_MM_SMI_IOMMU, "mm_smi_iommu", "disp_sel", 0),
+ /* MM2 */
+ GATE_MM2(CLK_MM_DSI_DSI0, "mm_dsi_dsi0", "disp_sel", 0),
+ GATE_MM2(CLK_MM_DPI_DPI0, "mm_dpi_dpi0", "dpi_sel", 8),
+ GATE_MM2(CLK_MM_26MHZ, "mm_26mhz", "clk26m", 24),
+ GATE_MM2(CLK_MM_32KHZ, "mm_32khz", "clk32k", 25),
+};
+
+static int clk_mt8192_mm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data);
+ if (r)
+ return r;
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static struct platform_driver clk_mt8192_mm_drv = {
+ .probe = clk_mt8192_mm_probe,
+ .driver = {
+ .name = "clk-mt8192-mm",
+ },
+};
+
+builtin_platform_driver(clk_mt8192_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt8192-msdc.c b/drivers/clk/mediatek/clk-mt8192-msdc.c
new file mode 100644
index 000000000000..87c3b79b79cf
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8192-msdc.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8192-clk.h>
+
+static const struct mtk_gate_regs msdc_cg_regs = {
+ .set_ofs = 0xb4,
+ .clr_ofs = 0xb4,
+ .sta_ofs = 0xb4,
+};
+
+static const struct mtk_gate_regs msdc_top_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_MSDC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &msdc_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_MSDC_TOP(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &msdc_top_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate msdc_clks[] = {
+ GATE_MSDC(CLK_MSDC_AXI_WRAP, "msdc_axi_wrap", "axi_sel", 22),
+};
+
+static const struct mtk_gate msdc_top_clks[] = {
+ GATE_MSDC_TOP(CLK_MSDC_TOP_AES_0P, "msdc_top_aes_0p", "aes_msdcfde_sel", 0),
+ GATE_MSDC_TOP(CLK_MSDC_TOP_SRC_0P, "msdc_top_src_0p", "infra_msdc0_src", 1),
+ GATE_MSDC_TOP(CLK_MSDC_TOP_SRC_1P, "msdc_top_src_1p", "infra_msdc1_src", 2),
+ GATE_MSDC_TOP(CLK_MSDC_TOP_SRC_2P, "msdc_top_src_2p", "infra_msdc2_src", 3),
+ GATE_MSDC_TOP(CLK_MSDC_TOP_P_MSDC0, "msdc_top_p_msdc0", "axi_sel", 4),
+ GATE_MSDC_TOP(CLK_MSDC_TOP_P_MSDC1, "msdc_top_p_msdc1", "axi_sel", 5),
+ GATE_MSDC_TOP(CLK_MSDC_TOP_P_MSDC2, "msdc_top_p_msdc2", "axi_sel", 6),
+ GATE_MSDC_TOP(CLK_MSDC_TOP_P_CFG, "msdc_top_p_cfg", "axi_sel", 7),
+ GATE_MSDC_TOP(CLK_MSDC_TOP_AXI, "msdc_top_axi", "axi_sel", 8),
+ GATE_MSDC_TOP(CLK_MSDC_TOP_H_MST_0P, "msdc_top_h_mst_0p", "infra_msdc0", 9),
+ GATE_MSDC_TOP(CLK_MSDC_TOP_H_MST_1P, "msdc_top_h_mst_1p", "infra_msdc1", 10),
+ GATE_MSDC_TOP(CLK_MSDC_TOP_H_MST_2P, "msdc_top_h_mst_2p", "infra_msdc2", 11),
+ GATE_MSDC_TOP(CLK_MSDC_TOP_MEM_OFF_DLY_26M, "msdc_top_mem_off_dly_26m", "clk26m", 12),
+ GATE_MSDC_TOP(CLK_MSDC_TOP_32K, "msdc_top_32k", "clk32k", 13),
+ GATE_MSDC_TOP(CLK_MSDC_TOP_AHB2AXI_BRG_AXI, "msdc_top_ahb2axi_brg_axi", "axi_sel", 14),
+};
+
+static const struct mtk_clk_desc msdc_desc = {
+ .clks = msdc_clks,
+ .num_clks = ARRAY_SIZE(msdc_clks),
+};
+
+static const struct mtk_clk_desc msdc_top_desc = {
+ .clks = msdc_top_clks,
+ .num_clks = ARRAY_SIZE(msdc_top_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8192_msdc[] = {
+ {
+ .compatible = "mediatek,mt8192-msdc",
+ .data = &msdc_desc,
+ }, {
+ .compatible = "mediatek,mt8192-msdc_top",
+ .data = &msdc_top_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8192_msdc_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8192-msdc",
+ .of_match_table = of_match_clk_mt8192_msdc,
+ },
+};
+
+builtin_platform_driver(clk_mt8192_msdc_drv);
diff --git a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
new file mode 100644
index 000000000000..58725d79dd13
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8192-clk.h>
+
+static const struct mtk_gate_regs scp_adsp_cg_regs = {
+ .set_ofs = 0x180,
+ .clr_ofs = 0x180,
+ .sta_ofs = 0x180,
+};
+
+#define GATE_SCP_ADSP(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &scp_adsp_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate scp_adsp_clks[] = {
+ GATE_SCP_ADSP(CLK_SCP_ADSP_AUDIODSP, "scp_adsp_audiodsp", "adsp_sel", 0),
+};
+
+static const struct mtk_clk_desc scp_adsp_desc = {
+ .clks = scp_adsp_clks,
+ .num_clks = ARRAY_SIZE(scp_adsp_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8192_scp_adsp[] = {
+ {
+ .compatible = "mediatek,mt8192-scp_adsp",
+ .data = &scp_adsp_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8192_scp_adsp_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8192-scp_adsp",
+ .of_match_table = of_match_clk_mt8192_scp_adsp,
+ },
+};
+
+builtin_platform_driver(clk_mt8192_scp_adsp_drv);
diff --git a/drivers/clk/mediatek/clk-mt8192-vdec.c b/drivers/clk/mediatek/clk-mt8192-vdec.c
new file mode 100644
index 000000000000..b1d95cfbf22a
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8192-vdec.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8192-clk.h>
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vdec2_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec2_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate vdec_clks[] = {
+ /* VDEC0 */
+ GATE_VDEC0(CLK_VDEC_VDEC, "vdec_vdec", "vdec_sel", 0),
+ GATE_VDEC0(CLK_VDEC_ACTIVE, "vdec_active", "vdec_sel", 4),
+ /* VDEC1 */
+ GATE_VDEC1(CLK_VDEC_LAT, "vdec_lat", "vdec_sel", 0),
+ GATE_VDEC1(CLK_VDEC_LAT_ACTIVE, "vdec_lat_active", "vdec_sel", 4),
+ /* VDEC2 */
+ GATE_VDEC2(CLK_VDEC_LARB1, "vdec_larb1", "vdec_sel", 0),
+};
+
+static const struct mtk_gate vdec_soc_clks[] = {
+ /* VDEC_SOC0 */
+ GATE_VDEC0(CLK_VDEC_SOC_VDEC, "vdec_soc_vdec", "vdec_sel", 0),
+ GATE_VDEC0(CLK_VDEC_SOC_VDEC_ACTIVE, "vdec_soc_vdec_active", "vdec_sel", 4),
+ /* VDEC_SOC1 */
+ GATE_VDEC1(CLK_VDEC_SOC_LAT, "vdec_soc_lat", "vdec_sel", 0),
+ GATE_VDEC1(CLK_VDEC_SOC_LAT_ACTIVE, "vdec_soc_lat_active", "vdec_sel", 4),
+ /* VDEC_SOC2 */
+ GATE_VDEC2(CLK_VDEC_SOC_LARB1, "vdec_soc_larb1", "vdec_sel", 0),
+};
+
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
+};
+
+static const struct mtk_clk_desc vdec_soc_desc = {
+ .clks = vdec_soc_clks,
+ .num_clks = ARRAY_SIZE(vdec_soc_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8192_vdec[] = {
+ {
+ .compatible = "mediatek,mt8192-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ .compatible = "mediatek,mt8192-vdecsys_soc",
+ .data = &vdec_soc_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8192_vdec_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8192-vdec",
+ .of_match_table = of_match_clk_mt8192_vdec,
+ },
+};
+
+builtin_platform_driver(clk_mt8192_vdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt8192-venc.c b/drivers/clk/mediatek/clk-mt8192-venc.c
new file mode 100644
index 000000000000..c0d867bff09e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8192-venc.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8192-clk.h>
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate venc_clks[] = {
+ GATE_VENC(CLK_VENC_SET0_LARB, "venc_set0_larb", "venc_sel", 0),
+ GATE_VENC(CLK_VENC_SET1_VENC, "venc_set1_venc", "venc_sel", 4),
+ GATE_VENC(CLK_VENC_SET2_JPGENC, "venc_set2_jpgenc", "venc_sel", 8),
+ GATE_VENC(CLK_VENC_SET5_GALS, "venc_set5_gals", "venc_sel", 28),
+};
+
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8192_venc[] = {
+ {
+ .compatible = "mediatek,mt8192-vencsys",
+ .data = &venc_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8192_venc_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8192-venc",
+ .of_match_table = of_match_clk_mt8192_venc,
+ },
+};
+
+builtin_platform_driver(clk_mt8192_venc_drv);
diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c
new file mode 100644
index 000000000000..cbc7c6dbe0f4
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8192.c
@@ -0,0 +1,1326 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8192-clk.h>
+
+static DEFINE_SPINLOCK(mt8192_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_ULPOSC, "ulposc", NULL, 260000000),
+};
+
+static const struct mtk_fixed_factor top_early_divs[] = {
+ FACTOR(CLK_TOP_CSW_F26M_D2, "csw_f26m_d2", "clk26m", 1, 2),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D4_D2, "mainpll_d4_d2", "mainpll_d4", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D4_D4, "mainpll_d4_d4", "mainpll_d4", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D4_D8, "mainpll_d4_d8", "mainpll_d4", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D4_D16, "mainpll_d4_d16", "mainpll_d4", 1, 16),
+ FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAINPLL_D5_D2, "mainpll_d5_d2", "mainpll_d5", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D5_D4, "mainpll_d5_d4", "mainpll_d5", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D5_D8, "mainpll_d5_d8", "mainpll_d5", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_MAINPLL_D6_D2, "mainpll_d6_d2", "mainpll_d6", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D6_D4, "mainpll_d6_d4", "mainpll_d6", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_MAINPLL_D7_D2, "mainpll_d7_d2", "mainpll_d7", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D7_D4, "mainpll_d7_d4", "mainpll_d7", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D7_D8, "mainpll_d7_d8", "mainpll_d7", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D2, "univpll_d4_d2", "univpll_d4", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D4, "univpll_d4_d4", "univpll_d4", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D8, "univpll_d4_d8", "univpll_d4", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll_d5", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll_d5", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D8, "univpll_d5_d8", "univpll_d5", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D2, "univpll_d6_d2", "univpll_d6", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D4, "univpll_d6_d4", "univpll_d6", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D8, "univpll_d6_d8", "univpll_d6", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D16, "univpll_d6_d16", "univpll_d6", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7),
+ FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+ FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1", 1, 2),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1", 1, 4),
+ FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1", 1, 8),
+ FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1),
+ FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2", 1, 2),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2", 1, 4),
+ FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2", 1, 8),
+ FACTOR(CLK_TOP_MMPLL_D4, "mmpll_d4", "mmpll", 1, 4),
+ FACTOR(CLK_TOP_MMPLL_D4_D2, "mmpll_d4_d2", "mmpll_d4", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D5, "mmpll_d5", "mmpll", 1, 5),
+ FACTOR(CLK_TOP_MMPLL_D5_D2, "mmpll_d5_d2", "mmpll_d5", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D6, "mmpll_d6", "mmpll", 1, 6),
+ FACTOR(CLK_TOP_MMPLL_D6_D2, "mmpll_d6_d2", "mmpll_d6", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D7, "mmpll_d7", "mmpll", 1, 7),
+ FACTOR(CLK_TOP_MMPLL_D9, "mmpll_d9", "mmpll", 1, 9),
+ FACTOR(CLK_TOP_APUPLL, "apupll_ck", "apupll", 1, 2),
+ FACTOR(CLK_TOP_NPUPLL, "npupll_ck", "npupll", 1, 1),
+ FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll", 1, 1),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll", 1, 8),
+ FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll", 1, 16),
+ FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4),
+ FACTOR(CLK_TOP_OSC_D2, "osc_d2", "ulposc", 1, 2),
+ FACTOR(CLK_TOP_OSC_D4, "osc_d4", "ulposc", 1, 4),
+ FACTOR(CLK_TOP_OSC_D8, "osc_d8", "ulposc", 1, 8),
+ FACTOR(CLK_TOP_OSC_D10, "osc_d10", "ulposc", 1, 10),
+ FACTOR(CLK_TOP_OSC_D16, "osc_d16", "ulposc", 1, 16),
+ FACTOR(CLK_TOP_OSC_D20, "osc_d20", "ulposc", 1, 20),
+ FACTOR(CLK_TOP_ADSPPLL, "adsppll_ck", "adsppll", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL_192M, "univpll_192m", "univpll", 1, 13),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D2, "univpll_192m_d2", "univpll_192m", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D4, "univpll_192m_d4", "univpll_192m", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D8, "univpll_192m_d8", "univpll_192m", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D16, "univpll_192m_d16", "univpll_192m", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D32, "univpll_192m_d32", "univpll_192m", 1, 32),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2",
+ "mainpll_d6_d2",
+ "osc_d4"
+};
+
+static const char * const spm_parents[] = {
+ "clk26m",
+ "osc_d10",
+ "mainpll_d7_d4",
+ "clk32k"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "univpll_d5",
+ "mainpll_d6_d2",
+ "mainpll_d6",
+ "univpll_d6",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2",
+ "univpll_d4_d2"
+};
+
+static const char * const bus_aximem_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2",
+ "mainpll_d6"
+};
+
+static const char * const disp_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2",
+ "univpll_d4_d2",
+ "mmpll_d7",
+ "univpll_d6",
+ "mainpll_d4",
+ "mmpll_d5_d2"
+};
+
+static const char * const mdp_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "mainpll_d4_d2",
+ "mmpll_d4_d2",
+ "mainpll_d6",
+ "univpll_d6",
+ "mainpll_d4",
+ "tvdpll_ck",
+ "univpll_d4",
+ "mmpll_d5_d2"
+};
+
+static const char * const img1_parents[] = {
+ "clk26m",
+ "univpll_d4",
+ "tvdpll_ck",
+ "mainpll_d4",
+ "univpll_d5",
+ "mmpll_d6",
+ "univpll_d6",
+ "mainpll_d6",
+ "mmpll_d4_d2",
+ "mainpll_d4_d2",
+ "mmpll_d6_d2",
+ "mmpll_d5_d2"
+};
+
+static const char * const img2_parents[] = {
+ "clk26m",
+ "univpll_d4",
+ "tvdpll_ck",
+ "mainpll_d4",
+ "univpll_d5",
+ "mmpll_d6",
+ "univpll_d6",
+ "mainpll_d6",
+ "mmpll_d4_d2",
+ "mainpll_d4_d2",
+ "mmpll_d6_d2",
+ "mmpll_d5_d2"
+};
+
+static const char * const ipe_parents[] = {
+ "clk26m",
+ "mainpll_d4",
+ "mmpll_d6",
+ "univpll_d6",
+ "mainpll_d6",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "mmpll_d6_d2",
+ "mmpll_d5_d2"
+};
+
+static const char * const dpe_parents[] = {
+ "clk26m",
+ "mainpll_d4",
+ "mmpll_d6",
+ "univpll_d6",
+ "mainpll_d6",
+ "univpll_d4_d2",
+ "univpll_d5_d2",
+ "mmpll_d6_d2"
+};
+
+static const char * const cam_parents[] = {
+ "clk26m",
+ "mainpll_d4",
+ "mmpll_d6",
+ "univpll_d4",
+ "univpll_d5",
+ "univpll_d6",
+ "mmpll_d7",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "univpll_d6_d2"
+};
+
+static const char * const ccu_parents[] = {
+ "clk26m",
+ "mainpll_d4",
+ "mmpll_d6",
+ "mainpll_d6",
+ "mmpll_d7",
+ "univpll_d4_d2",
+ "mmpll_d6_d2",
+ "mmpll_d5_d2",
+ "univpll_d5",
+ "univpll_d6_d2"
+};
+
+static const char * const dsp7_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mmpll_d6",
+ "univpll_d5",
+ "mmpll_d5",
+ "univpll_d4",
+ "mmpll_d4"
+};
+
+static const char * const mfg_ref_parents[] = {
+ "clk26m",
+ "clk26m",
+ "univpll_d6",
+ "mainpll_d5_d2"
+};
+
+static const char * const mfg_pll_parents[] = {
+ "mfg_ref_sel",
+ "mfgpll"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "univpll_192m_d8",
+ "univpll_d6_d8",
+ "univpll_192m_d4",
+ "univpll_d6_d16",
+ "csw_f26m_d2",
+ "univpll_192m_d16",
+ "univpll_192m_d32"
+};
+
+static const char * const camtg2_parents[] = {
+ "clk26m",
+ "univpll_192m_d8",
+ "univpll_d6_d8",
+ "univpll_192m_d4",
+ "univpll_d6_d16",
+ "csw_f26m_d2",
+ "univpll_192m_d16",
+ "univpll_192m_d32"
+};
+
+static const char * const camtg3_parents[] = {
+ "clk26m",
+ "univpll_192m_d8",
+ "univpll_d6_d8",
+ "univpll_192m_d4",
+ "univpll_d6_d16",
+ "csw_f26m_d2",
+ "univpll_192m_d16",
+ "univpll_192m_d32"
+};
+
+static const char * const camtg4_parents[] = {
+ "clk26m",
+ "univpll_192m_d8",
+ "univpll_d6_d8",
+ "univpll_192m_d4",
+ "univpll_d6_d16",
+ "csw_f26m_d2",
+ "univpll_192m_d16",
+ "univpll_192m_d32"
+};
+
+static const char * const camtg5_parents[] = {
+ "clk26m",
+ "univpll_192m_d8",
+ "univpll_d6_d8",
+ "univpll_192m_d4",
+ "univpll_d6_d16",
+ "csw_f26m_d2",
+ "univpll_192m_d16",
+ "univpll_192m_d32"
+};
+
+static const char * const camtg6_parents[] = {
+ "clk26m",
+ "univpll_192m_d8",
+ "univpll_d6_d8",
+ "univpll_192m_d4",
+ "univpll_d6_d16",
+ "csw_f26m_d2",
+ "univpll_192m_d16",
+ "univpll_192m_d32"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll_d6_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "mainpll_d5_d4",
+ "mainpll_d6_d4",
+ "msdcpll_d4"
+};
+
+static const char * const msdc50_0_h_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d6_d2"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "msdcpll_d2",
+ "univpll_d4_d4",
+ "mainpll_d6_d2",
+ "univpll_d4_d2"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d6_d2",
+ "mainpll_d7_d2",
+ "msdcpll_d2"
+};
+
+static const char * const msdc30_2_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d6_d2",
+ "mainpll_d7_d2",
+ "msdcpll_d2"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "mainpll_d5_d8",
+ "mainpll_d7_d8",
+ "mainpll_d4_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d7_d4"
+};
+
+static const char * const pwrap_ulposc_parents[] = {
+ "osc_d10",
+ "clk26m",
+ "osc_d4",
+ "osc_d8",
+ "osc_d16"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const dpi_parents[] = {
+ "clk26m",
+ "tvdpll_d2",
+ "tvdpll_d4",
+ "tvdpll_d8",
+ "tvdpll_d16"
+};
+
+static const char * const scam_parents[] = {
+ "clk26m",
+ "mainpll_d5_d4"
+};
+
+static const char * const disp_pwm_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "osc_d2",
+ "osc_d4",
+ "osc_d16"
+};
+
+static const char * const usb_top_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d6_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const ssusb_xhci_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d6_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "mainpll_d4_d8",
+ "univpll_d5_d4"
+};
+
+static const char * const seninf_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "univpll_d7",
+ "univpll_d6",
+ "mmpll_d6",
+ "univpll_d5"
+};
+
+static const char * const seninf1_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "univpll_d7",
+ "univpll_d6",
+ "mmpll_d6",
+ "univpll_d5"
+};
+
+static const char * const seninf2_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "univpll_d7",
+ "univpll_d6",
+ "mmpll_d6",
+ "univpll_d5"
+};
+
+static const char * const seninf3_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "univpll_d7",
+ "univpll_d6",
+ "mmpll_d6",
+ "univpll_d5"
+};
+
+static const char * const tl_parents[] = {
+ "clk26m",
+ "univpll_192m_d2",
+ "mainpll_d6_d4"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d4_d4",
+ "mainpll_d4_d8"
+};
+
+static const char * const aud_engen1_parents[] = {
+ "clk26m",
+ "apll1_d2",
+ "apll1_d4",
+ "apll1_d8"
+};
+
+static const char * const aud_engen2_parents[] = {
+ "clk26m",
+ "apll2_d2",
+ "apll2_d4",
+ "apll2_d8"
+};
+
+static const char * const aes_ufsfde_parents[] = {
+ "clk26m",
+ "mainpll_d4",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d4_d4",
+ "univpll_d4_d2",
+ "univpll_d6"
+};
+
+static const char * const ufs_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d8",
+ "univpll_d4_d4",
+ "mainpll_d6_d2",
+ "mainpll_d5_d2",
+ "msdcpll_d2"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "apll1_ck"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "apll2_ck"
+};
+
+static const char * const adsp_parents[] = {
+ "clk26m",
+ "mainpll_d6",
+ "mainpll_d5_d2",
+ "univpll_d4_d4",
+ "univpll_d4",
+ "univpll_d6",
+ "ulposc",
+ "adsppll_ck"
+};
+
+static const char * const dpmaif_main_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "mainpll_d6",
+ "mainpll_d4_d2",
+ "univpll_d4_d2"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "mmpll_d7",
+ "mainpll_d6",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "univpll_d6",
+ "mmpll_d6",
+ "mainpll_d5_d2",
+ "mainpll_d6_d2",
+ "mmpll_d9",
+ "univpll_d4_d4",
+ "mainpll_d4",
+ "univpll_d4",
+ "univpll_d5",
+ "univpll_d5_d2",
+ "mainpll_d5"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "univpll_192m_d2",
+ "univpll_d5_d4",
+ "mainpll_d5",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2",
+ "mainpll_d4_d2",
+ "univpll_d4_d2",
+ "univpll_d7",
+ "mmpll_d7",
+ "mmpll_d6",
+ "univpll_d5",
+ "mainpll_d4",
+ "univpll_d4",
+ "univpll_d6"
+};
+
+static const char * const camtm_parents[] = {
+ "clk26m",
+ "univpll_d7",
+ "univpll_d6_d2",
+ "univpll_d4_d2"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll_d4_d8"
+};
+
+static const char * const audio_h_parents[] = {
+ "clk26m",
+ "univpll_d7",
+ "apll1_ck",
+ "apll2_ck"
+};
+
+static const char * const spmi_mst_parents[] = {
+ "clk26m",
+ "csw_f26m_d2",
+ "osc_d8",
+ "osc_d10",
+ "osc_d16",
+ "osc_d20",
+ "clk32k"
+};
+
+static const char * const aes_msdcfde_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d4_d4",
+ "univpll_d4_d2",
+ "univpll_d6"
+};
+
+static const char * const sflash_parents[] = {
+ "clk26m",
+ "mainpll_d7_d8",
+ "univpll_d6_d8",
+ "univpll_d5_d8"
+};
+
+static const char * const apll_i2s0_m_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s1_m_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s2_m_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s3_m_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s4_m_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s5_m_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s6_m_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s7_m_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s8_m_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s9_m_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+/*
+ * CRITICAL CLOCK:
+ * axi_sel is the main bus clock of whole SOC.
+ * spm_sel is the clock of the always-on co-processor.
+ * bus_aximem_sel is clock of the bus that access emi.
+ */
+static const struct mtk_mux top_mtk_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_AXI_SEL, "axi_sel",
+ axi_parents, 0x010, 0x014, 0x018, 0, 3, 7, 0x004, 0,
+ CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SPM_SEL, "spm_sel",
+ spm_parents, 0x010, 0x014, 0x018, 8, 2, 15, 0x004, 1,
+ CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SCP_SEL, "scp_sel",
+ scp_parents, 0x010, 0x014, 0x018, 16, 3, 23, 0x004, 2),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_BUS_AXIMEM_SEL, "bus_aximem_sel",
+ bus_aximem_parents, 0x010, 0x014, 0x018, 24, 3, 31, 0x004, 3,
+ CLK_IS_CRITICAL),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_SEL, "disp_sel",
+ disp_parents, 0x020, 0x024, 0x028, 0, 4, 7, 0x004, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MDP_SEL, "mdp_sel",
+ mdp_parents, 0x020, 0x024, 0x028, 8, 4, 15, 0x004, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IMG1_SEL, "img1_sel",
+ img1_parents, 0x020, 0x024, 0x028, 16, 4, 23, 0x004, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IMG2_SEL, "img2_sel",
+ img2_parents, 0x020, 0x024, 0x028, 24, 4, 31, 0x004, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IPE_SEL, "ipe_sel",
+ ipe_parents, 0x030, 0x034, 0x038, 0, 4, 7, 0x004, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPE_SEL, "dpe_sel",
+ dpe_parents, 0x030, 0x034, 0x038, 8, 3, 15, 0x004, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAM_SEL, "cam_sel",
+ cam_parents, 0x030, 0x034, 0x038, 16, 4, 23, 0x004, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CCU_SEL, "ccu_sel",
+ ccu_parents, 0x030, 0x034, 0x038, 24, 4, 31, 0x004, 11),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP7_SEL, "dsp7_sel",
+ dsp7_parents, 0x050, 0x054, 0x058, 0, 3, 7, 0x004, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_REF_SEL, "mfg_ref_sel",
+ mfg_ref_parents, 0x050, 0x054, 0x058, 16, 2, 23, 0x004, 18),
+ MUX_CLR_SET_UPD(CLK_TOP_MFG_PLL_SEL, "mfg_pll_sel",
+ mfg_pll_parents, 0x050, 0x054, 0x058, 18, 1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG_SEL, "camtg_sel",
+ camtg_parents, 0x050, 0x054, 0x058, 24, 3, 31, 0x004, 19),
+ /* CLK_CFG_5 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG2_SEL, "camtg2_sel",
+ camtg2_parents, 0x060, 0x064, 0x068, 0, 3, 7, 0x004, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG3_SEL, "camtg3_sel",
+ camtg3_parents, 0x060, 0x064, 0x068, 8, 3, 15, 0x004, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG4_SEL, "camtg4_sel",
+ camtg4_parents, 0x060, 0x064, 0x068, 16, 3, 23, 0x004, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG5_SEL, "camtg5_sel",
+ camtg5_parents, 0x060, 0x064, 0x068, 24, 3, 31, 0x004, 23),
+ /* CLK_CFG_6 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG6_SEL, "camtg6_sel",
+ camtg6_parents, 0x070, 0x074, 0x078, 0, 3, 7, 0x004, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel",
+ uart_parents, 0x070, 0x074, 0x078, 8, 1, 15, 0x004, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel",
+ spi_parents, 0x070, 0x074, 0x078, 16, 2, 23, 0x004, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_H_SEL, "msdc50_0_h_sel",
+ msdc50_0_h_parents, 0x070, 0x074, 0x078, 24, 2, 31, 0x004, 27),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel",
+ msdc50_0_parents, 0x080, 0x084, 0x088, 0, 3, 7, 0x004, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel",
+ msdc30_1_parents, 0x080, 0x084, 0x088, 8, 3, 15, 0x004, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel",
+ msdc30_2_parents, 0x080, 0x084, 0x088, 16, 3, 23, 0x004, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_SEL, "audio_sel",
+ audio_parents, 0x080, 0x084, 0x088, 24, 2, 31, 0x008, 0),
+ /* CLK_CFG_8 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel",
+ aud_intbus_parents, 0x090, 0x094, 0x098, 0, 2, 7, 0x008, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWRAP_ULPOSC_SEL, "pwrap_ulposc_sel",
+ pwrap_ulposc_parents, 0x090, 0x094, 0x098, 8, 3, 15, 0x008, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ATB_SEL, "atb_sel",
+ atb_parents, 0x090, 0x094, 0x098, 16, 2, 23, 0x008, 3),
+ /* CLK_CFG_9 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI_SEL, "dpi_sel",
+ dpi_parents, 0x0a0, 0x0a4, 0x0a8, 0, 3, 7, 0x008, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SCAM_SEL, "scam_sel",
+ scam_parents, 0x0a0, 0x0a4, 0x0a8, 8, 1, 15, 0x008, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM_SEL, "disp_pwm_sel",
+ disp_pwm_parents, 0x0a0, 0x0a4, 0x0a8, 16, 3, 23, 0x008, 7),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP_SEL, "usb_top_sel",
+ usb_top_parents, 0x0a0, 0x0a4, 0x0a8, 24, 2, 31, 0x008, 8),
+ /* CLK_CFG_10 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI_SEL, "ssusb_xhci_sel",
+ ssusb_xhci_parents, 0x0b0, 0x0b4, 0x0b8, 0, 2, 7, 0x008, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C_SEL, "i2c_sel",
+ i2c_parents, 0x0b0, 0x0b4, 0x0b8, 8, 2, 15, 0x008, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF_SEL, "seninf_sel",
+ seninf_parents, 0x0b0, 0x0b4, 0x0b8, 16, 3, 23, 0x008, 11),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF1_SEL, "seninf1_sel",
+ seninf1_parents, 0x0b0, 0x0b4, 0x0b8, 24, 3, 31, 0x008, 12),
+ /* CLK_CFG_11 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF2_SEL, "seninf2_sel",
+ seninf2_parents, 0x0c0, 0x0c4, 0x0c8, 0, 3, 7, 0x008, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF3_SEL, "seninf3_sel",
+ seninf3_parents, 0x0c0, 0x0c4, 0x0c8, 8, 3, 15, 0x008, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_TL_SEL, "tl_sel",
+ tl_parents, 0x0c0, 0x0c4, 0x0c8, 16, 2, 23, 0x008, 15),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DXCC_SEL, "dxcc_sel",
+ dxcc_parents, 0x0c0, 0x0c4, 0x0c8, 24, 2, 31, 0x008, 16),
+ /* CLK_CFG_12 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENGEN1_SEL, "aud_engen1_sel",
+ aud_engen1_parents, 0x0d0, 0x0d4, 0x0d8, 0, 2, 7, 0x008, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENGEN2_SEL, "aud_engen2_sel",
+ aud_engen2_parents, 0x0d0, 0x0d4, 0x0d8, 8, 2, 15, 0x008, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_UFSFDE_SEL, "aes_ufsfde_sel",
+ aes_ufsfde_parents, 0x0d0, 0x0d4, 0x0d8, 16, 3, 23, 0x008, 19),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UFS_SEL, "ufs_sel",
+ ufs_parents, 0x0d0, 0x0d4, 0x0d8, 24, 3, 31, 0x008, 20),
+ /* CLK_CFG_13 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_1_SEL, "aud_1_sel",
+ aud_1_parents, 0x0e0, 0x0e4, 0x0e8, 0, 1, 7, 0x008, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_2_SEL, "aud_2_sel",
+ aud_2_parents, 0x0e0, 0x0e4, 0x0e8, 8, 1, 15, 0x008, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ADSP_SEL, "adsp_sel",
+ adsp_parents, 0x0e0, 0x0e4, 0x0e8, 16, 3, 23, 0x008, 23),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPMAIF_MAIN_SEL, "dpmaif_main_sel",
+ dpmaif_main_parents, 0x0e0, 0x0e4, 0x0e8, 24, 3, 31, 0x008, 24),
+ /* CLK_CFG_14 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VENC_SEL, "venc_sel",
+ venc_parents, 0x0f0, 0x0f4, 0x0f8, 0, 4, 7, 0x008, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VDEC_SEL, "vdec_sel",
+ vdec_parents, 0x0f0, 0x0f4, 0x0f8, 8, 4, 15, 0x008, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTM_SEL, "camtm_sel",
+ camtm_parents, 0x0f0, 0x0f4, 0x0f8, 16, 2, 23, 0x008, 27),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM_SEL, "pwm_sel",
+ pwm_parents, 0x0f0, 0x0f4, 0x0f8, 24, 1, 31, 0x008, 28),
+ /* CLK_CFG_15 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_H_SEL, "audio_h_sel",
+ audio_h_parents, 0x100, 0x104, 0x108, 0, 2, 7, 0x008, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPMI_MST_SEL, "spmi_mst_sel",
+ spmi_mst_parents, 0x100, 0x104, 0x108, 8, 3, 15, 0x008, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_MSDCFDE_SEL, "aes_msdcfde_sel",
+ aes_msdcfde_parents, 0x100, 0x104, 0x108, 24, 3, 31, 0x00c, 1),
+ /* CLK_CFG_16 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SFLASH_SEL, "sflash_sel",
+ sflash_parents, 0x110, 0x114, 0x118, 8, 2, 15, 0x00c, 3),
+};
+
+static struct mtk_composite top_muxes[] = {
+ /* CLK_AUDDIV_0 */
+ MUX(CLK_TOP_APLL_I2S0_M_SEL, "apll_i2s0_m_sel", apll_i2s0_m_parents, 0x320, 16, 1),
+ MUX(CLK_TOP_APLL_I2S1_M_SEL, "apll_i2s1_m_sel", apll_i2s1_m_parents, 0x320, 17, 1),
+ MUX(CLK_TOP_APLL_I2S2_M_SEL, "apll_i2s2_m_sel", apll_i2s2_m_parents, 0x320, 18, 1),
+ MUX(CLK_TOP_APLL_I2S3_M_SEL, "apll_i2s3_m_sel", apll_i2s3_m_parents, 0x320, 19, 1),
+ MUX(CLK_TOP_APLL_I2S4_M_SEL, "apll_i2s4_m_sel", apll_i2s4_m_parents, 0x320, 20, 1),
+ MUX(CLK_TOP_APLL_I2S5_M_SEL, "apll_i2s5_m_sel", apll_i2s5_m_parents, 0x320, 21, 1),
+ MUX(CLK_TOP_APLL_I2S6_M_SEL, "apll_i2s6_m_sel", apll_i2s6_m_parents, 0x320, 22, 1),
+ MUX(CLK_TOP_APLL_I2S7_M_SEL, "apll_i2s7_m_sel", apll_i2s7_m_parents, 0x320, 23, 1),
+ MUX(CLK_TOP_APLL_I2S8_M_SEL, "apll_i2s8_m_sel", apll_i2s8_m_parents, 0x320, 24, 1),
+ MUX(CLK_TOP_APLL_I2S9_M_SEL, "apll_i2s9_m_sel", apll_i2s9_m_parents, 0x320, 25, 1),
+};
+
+static const struct mtk_composite top_adj_divs[] = {
+ DIV_GATE(CLK_TOP_APLL12_DIV0, "apll12_div0", "apll_i2s0_m_sel", 0x320, 0, 0x328, 8, 0),
+ DIV_GATE(CLK_TOP_APLL12_DIV1, "apll12_div1", "apll_i2s1_m_sel", 0x320, 1, 0x328, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_DIV2, "apll12_div2", "apll_i2s2_m_sel", 0x320, 2, 0x328, 8, 16),
+ DIV_GATE(CLK_TOP_APLL12_DIV3, "apll12_div3", "apll_i2s3_m_sel", 0x320, 3, 0x328, 8, 24),
+ DIV_GATE(CLK_TOP_APLL12_DIV4, "apll12_div4", "apll_i2s4_m_sel", 0x320, 4, 0x334, 8, 0),
+ DIV_GATE(CLK_TOP_APLL12_DIVB, "apll12_divb", "apll12_div4", 0x320, 5, 0x334, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_DIV5, "apll12_div5", "apll_i2s5_m_sel", 0x320, 6, 0x334, 8, 16),
+ DIV_GATE(CLK_TOP_APLL12_DIV6, "apll12_div6", "apll_i2s6_m_sel", 0x320, 7, 0x334, 8, 24),
+ DIV_GATE(CLK_TOP_APLL12_DIV7, "apll12_div7", "apll_i2s7_m_sel", 0x320, 8, 0x338, 8, 0),
+ DIV_GATE(CLK_TOP_APLL12_DIV8, "apll12_div8", "apll_i2s8_m_sel", 0x320, 9, 0x338, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_DIV9, "apll12_div9", "apll_i2s9_m_sel", 0x320, 10, 0x338, 8, 16),
+};
+
+static const struct mtk_gate_regs apmixed_cg_regs = {
+ .set_ofs = 0x14,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x14,
+};
+
+#define GATE_APMIXED(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &apmixed_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate apmixed_clks[] = {
+ GATE_APMIXED(CLK_APMIXED_MIPID26M, "mipid26m", "clk26m", 16),
+};
+
+static const struct mtk_gate_regs infra0_cg_regs = {
+ .set_ofs = 0x80,
+ .clr_ofs = 0x84,
+ .sta_ofs = 0x90,
+};
+
+static const struct mtk_gate_regs infra1_cg_regs = {
+ .set_ofs = 0x88,
+ .clr_ofs = 0x8c,
+ .sta_ofs = 0x94,
+};
+
+static const struct mtk_gate_regs infra2_cg_regs = {
+ .set_ofs = 0xa4,
+ .clr_ofs = 0xa8,
+ .sta_ofs = 0xac,
+};
+
+static const struct mtk_gate_regs infra3_cg_regs = {
+ .set_ofs = 0xc0,
+ .clr_ofs = 0xc4,
+ .sta_ofs = 0xc8,
+};
+
+static const struct mtk_gate_regs infra4_cg_regs = {
+ .set_ofs = 0xd0,
+ .clr_ofs = 0xd4,
+ .sta_ofs = 0xd8,
+};
+
+static const struct mtk_gate_regs infra5_cg_regs = {
+ .set_ofs = 0xe0,
+ .clr_ofs = 0xe4,
+ .sta_ofs = 0xe8,
+};
+
+#define GATE_INFRA0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA1_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA1(_id, _name, _parent, _shift) \
+ GATE_INFRA1_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA3_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra3_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA3(_id, _name, _parent, _shift) \
+ GATE_INFRA3_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA4(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra4_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA5_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra5_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA5(_id, _name, _parent, _shift) \
+ GATE_INFRA5_FLAGS(_id, _name, _parent, _shift, 0)
+
+/*
+ * CRITICAL CLOCK:
+ * infra_133m and infra_66m are main peripheral bus clocks of SOC.
+ * infra_device_apc and infra_device_apc_sync are for device access permission control module.
+ */
+static const struct mtk_gate infra_clks[] = {
+ /* INFRA0 */
+ GATE_INFRA0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr", "pwrap_ulposc_sel", 0),
+ GATE_INFRA0(CLK_INFRA_PMIC_AP, "infra_pmic_ap", "pwrap_ulposc_sel", 1),
+ GATE_INFRA0(CLK_INFRA_PMIC_MD, "infra_pmic_md", "pwrap_ulposc_sel", 2),
+ GATE_INFRA0(CLK_INFRA_PMIC_CONN, "infra_pmic_conn", "pwrap_ulposc_sel", 3),
+ GATE_INFRA0(CLK_INFRA_SCPSYS, "infra_scpsys", "scp_sel", 4),
+ GATE_INFRA0(CLK_INFRA_SEJ, "infra_sej", "axi_sel", 5),
+ GATE_INFRA0(CLK_INFRA_APXGPT, "infra_apxgpt", "axi_sel", 6),
+ GATE_INFRA0(CLK_INFRA_GCE, "infra_gce", "axi_sel", 8),
+ GATE_INFRA0(CLK_INFRA_GCE2, "infra_gce2", "axi_sel", 9),
+ GATE_INFRA0(CLK_INFRA_THERM, "infra_therm", "axi_sel", 10),
+ GATE_INFRA0(CLK_INFRA_I2C0, "infra_i2c0", "i2c_sel", 11),
+ GATE_INFRA0(CLK_INFRA_AP_DMA_PSEUDO, "infra_ap_dma_pseudo", "axi_sel", 12),
+ GATE_INFRA0(CLK_INFRA_I2C2, "infra_i2c2", "i2c_sel", 13),
+ GATE_INFRA0(CLK_INFRA_I2C3, "infra_i2c3", "i2c_sel", 14),
+ GATE_INFRA0(CLK_INFRA_PWM_H, "infra_pwm_h", "axi_sel", 15),
+ GATE_INFRA0(CLK_INFRA_PWM1, "infra_pwm1", "pwm_sel", 16),
+ GATE_INFRA0(CLK_INFRA_PWM2, "infra_pwm2", "pwm_sel", 17),
+ GATE_INFRA0(CLK_INFRA_PWM3, "infra_pwm3", "pwm_sel", 18),
+ GATE_INFRA0(CLK_INFRA_PWM4, "infra_pwm4", "pwm_sel", 19),
+ GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm", "pwm_sel", 21),
+ GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0", "uart_sel", 22),
+ GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1", "uart_sel", 23),
+ GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2", "uart_sel", 24),
+ GATE_INFRA0(CLK_INFRA_UART3, "infra_uart3", "uart_sel", 25),
+ GATE_INFRA0(CLK_INFRA_GCE_26M, "infra_gce_26m", "axi_sel", 27),
+ GATE_INFRA0(CLK_INFRA_CQ_DMA_FPC, "infra_cq_dma_fpc", "axi_sel", 28),
+ GATE_INFRA0(CLK_INFRA_BTIF, "infra_btif", "axi_sel", 31),
+ /* INFRA1 */
+ GATE_INFRA1(CLK_INFRA_SPI0, "infra_spi0", "spi_sel", 1),
+ GATE_INFRA1(CLK_INFRA_MSDC0, "infra_msdc0", "msdc50_0_h_sel", 2),
+ GATE_INFRA1(CLK_INFRA_MSDC1, "infra_msdc1", "msdc50_0_h_sel", 4),
+ GATE_INFRA1(CLK_INFRA_MSDC2, "infra_msdc2", "msdc50_0_h_sel", 5),
+ GATE_INFRA1(CLK_INFRA_MSDC0_SRC, "infra_msdc0_src", "msdc50_0_sel", 6),
+ GATE_INFRA1(CLK_INFRA_GCPU, "infra_gcpu", "axi_sel", 8),
+ GATE_INFRA1(CLK_INFRA_TRNG, "infra_trng", "axi_sel", 9),
+ GATE_INFRA1(CLK_INFRA_AUXADC, "infra_auxadc", "clk26m", 10),
+ GATE_INFRA1(CLK_INFRA_CPUM, "infra_cpum", "axi_sel", 11),
+ GATE_INFRA1(CLK_INFRA_CCIF1_AP, "infra_ccif1_ap", "axi_sel", 12),
+ GATE_INFRA1(CLK_INFRA_CCIF1_MD, "infra_ccif1_md", "axi_sel", 13),
+ GATE_INFRA1(CLK_INFRA_AUXADC_MD, "infra_auxadc_md", "clk26m", 14),
+ GATE_INFRA1(CLK_INFRA_PCIE_TL_26M, "infra_pcie_tl_26m", "axi_sel", 15),
+ GATE_INFRA1(CLK_INFRA_MSDC1_SRC, "infra_msdc1_src", "msdc30_1_sel", 16),
+ GATE_INFRA1(CLK_INFRA_MSDC2_SRC, "infra_msdc2_src", "msdc30_2_sel", 17),
+ GATE_INFRA1(CLK_INFRA_PCIE_TL_96M, "infra_pcie_tl_96m", "tl_sel", 18),
+ GATE_INFRA1(CLK_INFRA_PCIE_PL_P_250M, "infra_pcie_pl_p_250m", "axi_sel", 19),
+ GATE_INFRA1_FLAGS(CLK_INFRA_DEVICE_APC, "infra_device_apc", "axi_sel", 20, CLK_IS_CRITICAL),
+ GATE_INFRA1(CLK_INFRA_CCIF_AP, "infra_ccif_ap", "axi_sel", 23),
+ GATE_INFRA1(CLK_INFRA_DEBUGSYS, "infra_debugsys", "axi_sel", 24),
+ GATE_INFRA1(CLK_INFRA_AUDIO, "infra_audio", "axi_sel", 25),
+ GATE_INFRA1(CLK_INFRA_CCIF_MD, "infra_ccif_md", "axi_sel", 26),
+ GATE_INFRA1(CLK_INFRA_DXCC_SEC_CORE, "infra_dxcc_sec_core", "dxcc_sel", 27),
+ GATE_INFRA1(CLK_INFRA_DXCC_AO, "infra_dxcc_ao", "dxcc_sel", 28),
+ GATE_INFRA1(CLK_INFRA_DBG_TRACE, "infra_dbg_trace", "axi_sel", 29),
+ GATE_INFRA1(CLK_INFRA_DEVMPU_B, "infra_devmpu_b", "axi_sel", 30),
+ GATE_INFRA1(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m", "clk26m", 31),
+ /* INFRA2 */
+ GATE_INFRA2(CLK_INFRA_IRTX, "infra_irtx", "clk26m", 0),
+ GATE_INFRA2(CLK_INFRA_SSUSB, "infra_ssusb", "usb_top_sel", 1),
+ GATE_INFRA2(CLK_INFRA_DISP_PWM, "infra_disp_pwm", "axi_sel", 2),
+ GATE_INFRA2(CLK_INFRA_CLDMA_B, "infra_cldma_b", "axi_sel", 3),
+ GATE_INFRA2(CLK_INFRA_AUDIO_26M_B, "infra_audio_26m_b", "clk26m", 4),
+ GATE_INFRA2(CLK_INFRA_MODEM_TEMP_SHARE, "infra_modem_temp_share", "clk26m", 5),
+ GATE_INFRA2(CLK_INFRA_SPI1, "infra_spi1", "spi_sel", 6),
+ GATE_INFRA2(CLK_INFRA_I2C4, "infra_i2c4", "i2c_sel", 7),
+ GATE_INFRA2(CLK_INFRA_SPI2, "infra_spi2", "spi_sel", 9),
+ GATE_INFRA2(CLK_INFRA_SPI3, "infra_spi3", "spi_sel", 10),
+ GATE_INFRA2(CLK_INFRA_UNIPRO_SYS, "infra_unipro_sys", "ufs_sel", 11),
+ GATE_INFRA2(CLK_INFRA_UNIPRO_TICK, "infra_unipro_tick", "clk26m", 12),
+ GATE_INFRA2(CLK_INFRA_UFS_MP_SAP_B, "infra_ufs_mp_sap_b", "clk26m", 13),
+ GATE_INFRA2(CLK_INFRA_MD32_B, "infra_md32_b", "axi_sel", 14),
+ GATE_INFRA2(CLK_INFRA_UNIPRO_MBIST, "infra_unipro_mbist", "axi_sel", 16),
+ GATE_INFRA2(CLK_INFRA_I2C5, "infra_i2c5", "i2c_sel", 18),
+ GATE_INFRA2(CLK_INFRA_I2C5_ARBITER, "infra_i2c5_arbiter", "i2c_sel", 19),
+ GATE_INFRA2(CLK_INFRA_I2C5_IMM, "infra_i2c5_imm", "i2c_sel", 20),
+ GATE_INFRA2(CLK_INFRA_I2C1_ARBITER, "infra_i2c1_arbiter", "i2c_sel", 21),
+ GATE_INFRA2(CLK_INFRA_I2C1_IMM, "infra_i2c1_imm", "i2c_sel", 22),
+ GATE_INFRA2(CLK_INFRA_I2C2_ARBITER, "infra_i2c2_arbiter", "i2c_sel", 23),
+ GATE_INFRA2(CLK_INFRA_I2C2_IMM, "infra_i2c2_imm", "i2c_sel", 24),
+ GATE_INFRA2(CLK_INFRA_SPI4, "infra_spi4", "spi_sel", 25),
+ GATE_INFRA2(CLK_INFRA_SPI5, "infra_spi5", "spi_sel", 26),
+ GATE_INFRA2(CLK_INFRA_CQ_DMA, "infra_cq_dma", "axi_sel", 27),
+ GATE_INFRA2(CLK_INFRA_UFS, "infra_ufs", "ufs_sel", 28),
+ GATE_INFRA2(CLK_INFRA_AES_UFSFDE, "infra_aes_ufsfde", "aes_ufsfde_sel", 29),
+ GATE_INFRA2(CLK_INFRA_UFS_TICK, "infra_ufs_tick", "ufs_sel", 30),
+ GATE_INFRA2(CLK_INFRA_SSUSB_XHCI, "infra_ssusb_xhci", "ssusb_xhci_sel", 31),
+ /* INFRA3 */
+ GATE_INFRA3(CLK_INFRA_MSDC0_SELF, "infra_msdc0_self", "msdc50_0_sel", 0),
+ GATE_INFRA3(CLK_INFRA_MSDC1_SELF, "infra_msdc1_self", "msdc50_0_sel", 1),
+ GATE_INFRA3(CLK_INFRA_MSDC2_SELF, "infra_msdc2_self", "msdc50_0_sel", 2),
+ GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi", "axi_sel", 5),
+ GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6", "i2c_sel", 6),
+ GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0", "msdc50_0_sel", 7),
+ GATE_INFRA3(CLK_INFRA_MD_MSDC0, "infra_md_msdc0", "msdc50_0_sel", 8),
+ GATE_INFRA3(CLK_INFRA_CCIF5_AP, "infra_ccif5_ap", "axi_sel", 9),
+ GATE_INFRA3(CLK_INFRA_CCIF5_MD, "infra_ccif5_md", "axi_sel", 10),
+ GATE_INFRA3(CLK_INFRA_PCIE_TOP_H_133M, "infra_pcie_top_h_133m", "axi_sel", 11),
+ GATE_INFRA3(CLK_INFRA_FLASHIF_TOP_H_133M, "infra_flashif_top_h_133m", "axi_sel", 14),
+ GATE_INFRA3(CLK_INFRA_PCIE_PERI_26M, "infra_pcie_peri_26m", "axi_sel", 15),
+ GATE_INFRA3(CLK_INFRA_CCIF2_AP, "infra_ccif2_ap", "axi_sel", 16),
+ GATE_INFRA3(CLK_INFRA_CCIF2_MD, "infra_ccif2_md", "axi_sel", 17),
+ GATE_INFRA3(CLK_INFRA_CCIF3_AP, "infra_ccif3_ap", "axi_sel", 18),
+ GATE_INFRA3(CLK_INFRA_CCIF3_MD, "infra_ccif3_md", "axi_sel", 19),
+ GATE_INFRA3(CLK_INFRA_SEJ_F13M, "infra_sej_f13m", "clk26m", 20),
+ GATE_INFRA3(CLK_INFRA_AES, "infra_aes", "axi_sel", 21),
+ GATE_INFRA3(CLK_INFRA_I2C7, "infra_i2c7", "i2c_sel", 22),
+ GATE_INFRA3(CLK_INFRA_I2C8, "infra_i2c8", "i2c_sel", 23),
+ GATE_INFRA3(CLK_INFRA_FBIST2FPC, "infra_fbist2fpc", "msdc50_0_sel", 24),
+ GATE_INFRA3_FLAGS(CLK_INFRA_DEVICE_APC_SYNC, "infra_device_apc_sync", "axi_sel", 25,
+ CLK_IS_CRITICAL),
+ GATE_INFRA3(CLK_INFRA_DPMAIF_MAIN, "infra_dpmaif_main", "dpmaif_main_sel", 26),
+ GATE_INFRA3(CLK_INFRA_PCIE_TL_32K, "infra_pcie_tl_32k", "axi_sel", 27),
+ GATE_INFRA3(CLK_INFRA_CCIF4_AP, "infra_ccif4_ap", "axi_sel", 28),
+ GATE_INFRA3(CLK_INFRA_CCIF4_MD, "infra_ccif4_md", "axi_sel", 29),
+ GATE_INFRA3(CLK_INFRA_SPI6, "infra_spi6", "spi_sel", 30),
+ GATE_INFRA3(CLK_INFRA_SPI7, "infra_spi7", "spi_sel", 31),
+ /* INFRA4 */
+ GATE_INFRA4(CLK_INFRA_AP_DMA, "infra_ap_dma", "infra_ap_dma_pseudo", 31),
+ /* INFRA5 */
+ GATE_INFRA5_FLAGS(CLK_INFRA_133M, "infra_133m", "axi_sel", 0, CLK_IS_CRITICAL),
+ GATE_INFRA5_FLAGS(CLK_INFRA_66M, "infra_66m", "axi_sel", 1, CLK_IS_CRITICAL),
+ GATE_INFRA5(CLK_INFRA_66M_PERI_BUS, "infra_66m_peri_bus", "axi_sel", 2),
+ GATE_INFRA5(CLK_INFRA_FREE_DCM_133M, "infra_free_dcm_133m", "axi_sel", 3),
+ GATE_INFRA5(CLK_INFRA_FREE_DCM_66M, "infra_free_dcm_66m", "axi_sel", 4),
+ GATE_INFRA5(CLK_INFRA_PERI_BUS_DCM_133M, "infra_peri_bus_dcm_133m", "axi_sel", 5),
+ GATE_INFRA5(CLK_INFRA_PERI_BUS_DCM_66M, "infra_peri_bus_dcm_66m", "axi_sel", 6),
+ GATE_INFRA5(CLK_INFRA_FLASHIF_PERI_26M, "infra_flashif_peri_26m", "axi_sel", 30),
+ GATE_INFRA5(CLK_INFRA_FLASHIF_SFLASH, "infra_flashif_fsflash", "axi_sel", 31),
+};
+
+static const struct mtk_gate_regs peri_cg_regs = {
+ .set_ofs = 0x20c,
+ .clr_ofs = 0x20c,
+ .sta_ofs = 0x20c,
+};
+
+#define GATE_PERI(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate peri_clks[] = {
+ GATE_PERI(CLK_PERI_PERIAXI, "peri_periaxi", "axi_sel", 31),
+};
+
+static const struct mtk_gate_regs top_cg_regs = {
+ .set_ofs = 0x150,
+ .clr_ofs = 0x150,
+ .sta_ofs = 0x150,
+};
+
+#define GATE_TOP(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate top_clks[] = {
+ GATE_TOP(CLK_TOP_SSUSB_TOP_REF, "ssusb_top_ref", "clk26m", 24),
+ GATE_TOP(CLK_TOP_SSUSB_PHY_REF, "ssusb_phy_ref", "clk26m", 25),
+};
+
+#define MT8192_PLL_FMAX (3800UL * MHZ)
+#define MT8192_PLL_FMIN (1500UL * MHZ)
+#define MT8192_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcw_chg_reg, \
+ _en_reg, _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8192_PLL_FMAX, \
+ .fmin = MT8192_PLL_FMIN, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8192_INTEGER_BITS, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcw_chg_reg = _pcw_chg_reg, \
+ .en_reg = _en_reg, \
+ .pll_en_bit = _pll_en_bit, \
+ }
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift) \
+ PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, 0, 0, 0)
+
+static const struct mtk_pll_data plls[] = {
+ PLL_B(CLK_APMIXED_MAINPLL, "mainpll", 0x0340, 0x034c, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x0344, 24, 0, 0, 0, 0x0344, 0),
+ PLL_B(CLK_APMIXED_UNIVPLL, "univpll", 0x0308, 0x0314, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x030c, 24, 0, 0, 0, 0x030c, 0),
+ PLL(CLK_APMIXED_USBPLL, "usbpll", 0x03c4, 0x03cc, 0x00000000,
+ 0, 0, 22, 0x03c4, 24, 0, 0, 0, 0x03c4, 0, 0x03c4, 0x03cc, 2),
+ PLL_B(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0350, 0x035c, 0x00000000,
+ 0, 0, 22, 0x0354, 24, 0, 0, 0, 0x0354, 0),
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0360, 0x036c, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x0364, 24, 0, 0, 0, 0x0364, 0),
+ PLL_B(CLK_APMIXED_ADSPPLL, "adsppll", 0x0370, 0x037c, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x0374, 24, 0, 0, 0, 0x0374, 0),
+ PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0268, 0x0274, 0x00000000,
+ 0, 0, 22, 0x026c, 24, 0, 0, 0, 0x026c, 0),
+ PLL_B(CLK_APMIXED_TVDPLL, "tvdpll", 0x0380, 0x038c, 0x00000000,
+ 0, 0, 22, 0x0384, 24, 0, 0, 0, 0x0384, 0),
+ PLL_B(CLK_APMIXED_APLL1, "apll1", 0x0318, 0x0328, 0x00000000,
+ 0, 0, 32, 0x031c, 24, 0x0040, 0x000c, 0, 0x0320, 0),
+ PLL_B(CLK_APMIXED_APLL2, "apll2", 0x032c, 0x033c, 0x00000000,
+ 0, 0, 32, 0x0330, 24, 0, 0, 0, 0x0334, 0),
+};
+
+static struct clk_onecell_data *top_clk_data;
+
+static void clk_mt8192_top_init_early(struct device_node *node)
+{
+ int i;
+
+ top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ if (!top_clk_data)
+ return;
+
+ for (i = 0; i < CLK_TOP_NR_CLK; i++)
+ top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+
+ mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), top_clk_data);
+
+ of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+}
+
+CLK_OF_DECLARE_DRIVER(mt8192_topckgen, "mediatek,mt8192-topckgen",
+ clk_mt8192_top_init_early);
+
+static int clk_mt8192_top_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), top_clk_data);
+ mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), top_clk_data);
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+ mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node, &mt8192_clk_lock,
+ top_clk_data);
+ mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, &mt8192_clk_lock,
+ top_clk_data);
+ mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base, &mt8192_clk_lock,
+ top_clk_data);
+ r = mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data);
+ if (r)
+ return r;
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+}
+
+static int clk_mt8192_infra_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), clk_data);
+ if (r)
+ return r;
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int clk_mt8192_peri_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks), clk_data);
+ if (r)
+ return r;
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int clk_mt8192_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ r = mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+ if (r)
+ return r;
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8192[] = {
+ {
+ .compatible = "mediatek,mt8192-apmixedsys",
+ .data = clk_mt8192_apmixed_probe,
+ }, {
+ .compatible = "mediatek,mt8192-topckgen",
+ .data = clk_mt8192_top_probe,
+ }, {
+ .compatible = "mediatek,mt8192-infracfg",
+ .data = clk_mt8192_infra_probe,
+ }, {
+ .compatible = "mediatek,mt8192-pericfg",
+ .data = clk_mt8192_peri_probe,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt8192_probe(struct platform_device *pdev)
+{
+ int (*clk_probe)(struct platform_device *pdev);
+ int r;
+
+ clk_probe = of_device_get_match_data(&pdev->dev);
+ if (!clk_probe)
+ return -EINVAL;
+
+ r = clk_probe(pdev);
+ if (r)
+ dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt8192_drv = {
+ .probe = clk_mt8192_probe,
+ .driver = {
+ .name = "clk-mt8192",
+ .of_match_table = of_match_clk_mt8192,
+ },
+};
+
+static int __init clk_mt8192_init(void)
+{
+ return platform_driver_register(&clk_mt8192_drv);
+}
+
+arch_initcall(clk_mt8192_init);
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index cec1c8a27211..4b6096c44d74 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -13,6 +13,7 @@
#include <linux/clkdev.h>
#include <linux/mfd/syscon.h>
#include <linux/device.h>
+#include <linux/of_device.h>
#include "clk-mtk.h"
#include "clk-gate.h"
@@ -106,7 +107,7 @@ int mtk_clk_register_gates_with_dev(struct device_node *node,
if (!clk_data)
return -ENOMEM;
- regmap = syscon_node_to_regmap(node);
+ regmap = device_node_to_regmap(node);
if (IS_ERR(regmap)) {
pr_err("Cannot find regmap for %pOF: %ld\n", node,
PTR_ERR(regmap));
@@ -286,3 +287,25 @@ void mtk_clk_register_dividers(const struct mtk_clk_divider *mcds,
clk_data->clks[mcd->id] = clk;
}
}
+
+int mtk_clk_simple_probe(struct platform_device *pdev)
+{
+ const struct mtk_clk_desc *mcd;
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ mcd = of_device_get_match_data(&pdev->dev);
+ if (!mcd)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(mcd->num_clks);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_gates(node, mcd->clks, mcd->num_clks, clk_data);
+ if (r)
+ return r;
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index c3d6756b0c7e..7de41c3b3206 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -10,6 +10,7 @@
#include <linux/regmap.h>
#include <linux/bitops.h>
#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
struct clk;
struct clk_onecell_data;
@@ -213,13 +214,13 @@ struct mtk_pll_div_table {
struct mtk_pll_data {
int id;
const char *name;
- uint32_t reg;
- uint32_t pwr_reg;
- uint32_t en_mask;
- uint32_t pd_reg;
- uint32_t tuner_reg;
- uint32_t tuner_en_reg;
- uint8_t tuner_en_bit;
+ u32 reg;
+ u32 pwr_reg;
+ u32 en_mask;
+ u32 pd_reg;
+ u32 tuner_reg;
+ u32 tuner_en_reg;
+ u8 tuner_en_bit;
int pd_shift;
unsigned int flags;
const struct clk_ops *ops;
@@ -228,11 +229,13 @@ struct mtk_pll_data {
unsigned long fmax;
int pcwbits;
int pcwibits;
- uint32_t pcw_reg;
+ u32 pcw_reg;
int pcw_shift;
- uint32_t pcw_chg_reg;
+ u32 pcw_chg_reg;
const struct mtk_pll_div_table *div_table;
const char *parent_name;
+ u32 en_reg;
+ u8 pll_en_bit; /* Assume 0, indicates BIT(0) by default */
};
void mtk_clk_register_plls(struct device_node *node,
@@ -248,4 +251,11 @@ void mtk_register_reset_controller(struct device_node *np,
void mtk_register_reset_controller_set_clr(struct device_node *np,
unsigned int num_regs, int regofs);
+struct mtk_clk_desc {
+ const struct mtk_gate *clks;
+ size_t num_clks;
+};
+
+int mtk_clk_simple_probe(struct platform_device *pdev);
+
#endif /* __DRV_CLK_MTK_H */
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index b0c61709bacc..855b0a1f7eb9 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -116,7 +116,12 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
return 0;
}
-static const struct clk_ops mtk_mux_ops = {
+const struct clk_ops mtk_mux_clr_set_upd_ops = {
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+};
+
+const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
.enable = mtk_clk_mux_enable_setclr,
.disable = mtk_clk_mux_disable_setclr,
.is_enabled = mtk_clk_mux_is_enabled,
@@ -140,7 +145,7 @@ static struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
init.flags = mux->flags | CLK_SET_RATE_PARENT;
init.parent_names = mux->parent_names;
init.num_parents = mux->num_parents;
- init.ops = &mtk_mux_ops;
+ init.ops = mux->ops;
clk_mux->regmap = regmap;
clk_mux->data = mux;
@@ -165,7 +170,7 @@ int mtk_clk_register_muxes(const struct mtk_mux *muxes,
struct clk *clk;
int i;
- regmap = syscon_node_to_regmap(node);
+ regmap = device_node_to_regmap(node);
if (IS_ERR(regmap)) {
pr_err("Cannot find regmap for %pOF: %ld\n", node,
PTR_ERR(regmap));
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index f1946161ade1..27841d649118 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -33,12 +33,13 @@ struct mtk_mux {
u8 gate_shift;
s8 upd_shift;
+ const struct clk_ops *ops;
signed char num_parents;
};
#define GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
- _gate, _upd_ofs, _upd, _flags) { \
+ _gate, _upd_ofs, _upd, _flags, _ops) { \
.id = _id, \
.name = _name, \
.mux_ofs = _mux_ofs, \
@@ -52,14 +53,19 @@ struct mtk_mux {
.parent_names = _parents, \
.num_parents = ARRAY_SIZE(_parents), \
.flags = _flags, \
+ .ops = &_ops, \
}
+extern const struct clk_ops mtk_mux_clr_set_upd_ops;
+extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
+
#define MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
_gate, _upd_ofs, _upd, _flags) \
GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
- _gate, _upd_ofs, _upd, _flags) \
+ _gate, _upd_ofs, _upd, _flags, \
+ mtk_mux_gate_clr_set_upd_ops)
#define MUX_GATE_CLR_SET_UPD(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
@@ -69,6 +75,14 @@ struct mtk_mux {
_width, _gate, _upd_ofs, _upd, \
CLK_SET_RATE_PARENT)
+#define MUX_CLR_SET_UPD(_id, _name, _parents, _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, _width, \
+ _upd_ofs, _upd) \
+ GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, _width, \
+ 0, _upd_ofs, _upd, CLK_SET_RATE_PARENT, \
+ mtk_mux_clr_set_upd_ops)
+
int mtk_clk_register_muxes(const struct mtk_mux *muxes,
int num, struct device_node *node,
spinlock_t *lock,
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index f440f2cd0b69..7fb001a4e7d8 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -44,6 +44,7 @@ struct mtk_clk_pll {
void __iomem *tuner_en_addr;
void __iomem *pcw_addr;
void __iomem *pcw_chg_addr;
+ void __iomem *en_addr;
const struct mtk_pll_data *data;
};
@@ -56,7 +57,7 @@ static int mtk_pll_is_prepared(struct clk_hw *hw)
{
struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
- return (readl(pll->base_addr + REG_CON0) & CON0_BASE_EN) != 0;
+ return (readl(pll->en_addr) & BIT(pll->data->pll_en_bit)) != 0;
}
static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
@@ -238,6 +239,7 @@ static int mtk_pll_prepare(struct clk_hw *hw)
{
struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
u32 r;
+ u32 div_en_mask;
r = readl(pll->pwr_addr) | CON0_PWR_ON;
writel(r, pll->pwr_addr);
@@ -247,9 +249,14 @@ static int mtk_pll_prepare(struct clk_hw *hw)
writel(r, pll->pwr_addr);
udelay(1);
- r = readl(pll->base_addr + REG_CON0);
- r |= pll->data->en_mask;
- writel(r, pll->base_addr + REG_CON0);
+ r = readl(pll->en_addr) | BIT(pll->data->pll_en_bit);
+ writel(r, pll->en_addr);
+
+ div_en_mask = pll->data->en_mask & ~CON0_BASE_EN;
+ if (div_en_mask) {
+ r = readl(pll->base_addr + REG_CON0) | div_en_mask;
+ writel(r, pll->base_addr + REG_CON0);
+ }
__mtk_pll_tuner_enable(pll);
@@ -268,6 +275,7 @@ static void mtk_pll_unprepare(struct clk_hw *hw)
{
struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
u32 r;
+ u32 div_en_mask;
if (pll->data->flags & HAVE_RST_BAR) {
r = readl(pll->base_addr + REG_CON0);
@@ -277,9 +285,14 @@ static void mtk_pll_unprepare(struct clk_hw *hw)
__mtk_pll_tuner_disable(pll);
- r = readl(pll->base_addr + REG_CON0);
- r &= ~CON0_BASE_EN;
- writel(r, pll->base_addr + REG_CON0);
+ div_en_mask = pll->data->en_mask & ~CON0_BASE_EN;
+ if (div_en_mask) {
+ r = readl(pll->base_addr + REG_CON0) & ~div_en_mask;
+ writel(r, pll->base_addr + REG_CON0);
+ }
+
+ r = readl(pll->en_addr) & ~BIT(pll->data->pll_en_bit);
+ writel(r, pll->en_addr);
r = readl(pll->pwr_addr) | CON0_ISO_EN;
writel(r, pll->pwr_addr);
@@ -321,6 +334,10 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
pll->tuner_addr = base + data->tuner_reg;
if (data->tuner_en_reg)
pll->tuner_en_addr = base + data->tuner_en_reg;
+ if (data->en_reg)
+ pll->en_addr = base + data->en_reg;
+ else
+ pll->en_addr = pll->base_addr + REG_CON0;
pll->hw.init = &init;
pll->data = data;
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index cb939c071b0c..e562dc3c10a4 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -98,7 +98,7 @@ static void mtk_register_reset_controller_common(struct device_node *np,
int ret;
struct regmap *regmap;
- regmap = syscon_node_to_regmap(np);
+ regmap = device_node_to_regmap(np);
if (IS_ERR(regmap)) {
pr_err("Cannot find regmap for %pOF: %ld\n", np,
PTR_ERR(regmap));
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index 47680237d0be..8bc893df4736 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -265,6 +265,7 @@ static const char *powersave_parents[] = {
static const struct clk_muxing_soc_desc kirkwood_mux_desc[] __initconst = {
{ "powersave", powersave_parents, ARRAY_SIZE(powersave_parents),
11, 1, 0 },
+ { }
};
static struct clk *clk_muxing_get_src(
diff --git a/drivers/clk/pistachio/Kconfig b/drivers/clk/pistachio/Kconfig
new file mode 100644
index 000000000000..d00f7b4a25fc
--- /dev/null
+++ b/drivers/clk/pistachio/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config COMMON_CLK_PISTACHIO
+ bool "Support for IMG Pistachio SoC clock controllers"
+ depends on MIPS || COMPILE_TEST
+ help
+ Support for the IMG Pistachio SoC clock controller.
+ Say Y if you want to include clock support.
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 62e00e15495c..0a5596797b93 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -240,6 +240,14 @@ config MSM_MMCC_8960
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+config MSM_GCC_8953
+ tristate "MSM8953 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on msm8953 devices.
+ Say Y if you want to use devices such as UART, SPI i2c, USB,
+ SD/eMMC, display, graphics, camera etc.
+
config MSM_GCC_8974
tristate "MSM8974 Global Clock Controller"
select QCOM_GDSC
@@ -257,6 +265,15 @@ config MSM_MMCC_8974
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+config MSM_MMCC_8994
+ tristate "MSM8994 Multimedia Clock Controller"
+ select MSM_GCC_8994
+ select QCOM_GDSC
+ help
+ Support for the multimedia clock controller on msm8994 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
config MSM_GCC_8994
tristate "MSM8994 Global Clock Controller"
help
@@ -332,6 +349,15 @@ config SC_DISPCC_7180
Say Y if you want to support display devices and functionality such as
splash screen.
+config SC_DISPCC_7280
+ tristate "SC7280 Display Clock Controller"
+ select SC_GCC_7280
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc.
+ SC7280 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SC_GCC_7180
tristate "SC7180 Global Clock Controller"
select QCOM_GDSC
@@ -376,6 +402,14 @@ config SC_GPUCC_7180
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SC_GPUCC_7280
+ tristate "SC7280 Graphics Clock Controller"
+ select SC_GCC_7280
+ help
+ Support for the graphics clock controller on SC7280 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SC_MSS_7180
tristate "SC7180 Modem Clock Controller"
select SC_GCC_7180
@@ -393,6 +427,14 @@ config SC_VIDEOCC_7180
Say Y if you want to support video devices and functionality such as
video encode and decode.
+config SC_VIDEOCC_7280
+ tristate "SC7280 Video Clock Controller"
+ select SC_GCC_7280
+ help
+ Support for the video clock controller on SC7280 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+
config SDM_CAMCC_845
tristate "SDM845 Camera Clock Controller"
select SDM_GCC_845
@@ -506,6 +548,13 @@ config SM_DISPCC_8250
Say Y if you want to support display devices and functionality such as
splash screen.
+config SM_GCC_6115
+ tristate "SM6115 and SM4250 Global Clock Controller"
+ help
+ Support for the global clock controller on SM6115 and SM4250 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2C, USB, UFS, SDDC, PCIe, etc.
+
config SM_GCC_6125
tristate "SM6125 Global Clock Controller"
help
@@ -513,6 +562,13 @@ config SM_GCC_6125
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_6350
+ tristate "SM6350 Global Clock Controller"
+ help
+ Support for the global clock controller on SM6350 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GCC_8150
tristate "SM8150 Global Clock Controller"
help
@@ -554,7 +610,7 @@ config SM_GPUCC_8250
config SM_VIDEOCC_8150
tristate "SM8150 Video Clock Controller"
- select SDM_GCC_8150
+ select SM_GCC_8150
select QCOM_GDSC
help
Support for the video clock controller on SM8150 devices.
@@ -563,7 +619,7 @@ config SM_VIDEOCC_8150
config SM_VIDEOCC_8250
tristate "SM8250 Video Clock Controller"
- select SDM_GCC_8250
+ select SM_GCC_8250
select QCOM_GDSC
help
Support for the video clock controller on SM8250 devices.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index c2a1cafb31bc..9825ef843f4a 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
obj-$(CONFIG_MSM_GCC_8939) += gcc-msm8939.o
+obj-$(CONFIG_MSM_GCC_8953) += gcc-msm8953.o
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
@@ -42,6 +43,7 @@ obj-$(CONFIG_MSM_GCC_8998) += gcc-msm8998.o
obj-$(CONFIG_MSM_GPUCC_8998) += gpucc-msm8998.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
+obj-$(CONFIG_MSM_MMCC_8994) += mmcc-msm8994.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
obj-$(CONFIG_MSM_MMCC_8998) += mmcc-msm8998.o
obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
@@ -57,13 +59,16 @@ obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_SC_CAMCC_7180) += camcc-sc7180.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
+obj-$(CONFIG_SC_DISPCC_7280) += dispcc-sc7280.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
obj-$(CONFIG_SC_GCC_7280) += gcc-sc7280.o
obj-$(CONFIG_SC_GCC_8180X) += gcc-sc8180x.o
obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
+obj-$(CONFIG_SC_GPUCC_7280) += gpucc-sc7280.o
obj-$(CONFIG_SC_LPASS_CORECC_7180) += lpasscorecc-sc7180.o
obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o
obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o
+obj-$(CONFIG_SC_VIDEOCC_7280) += videocc-sc7280.o
obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o
@@ -76,7 +81,9 @@ obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
+obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
+obj-$(CONFIG_SM_GCC_6350) += gcc-sm6350.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
index af6ac17c7dae..9e6decb9c26f 100644
--- a/drivers/clk/qcom/a53-pll.c
+++ b/drivers/clk/qcom/a53-pll.c
@@ -6,9 +6,11 @@
* Author: Georgi Djakov <georgi.djakov@linaro.org>
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
#include <linux/regmap.h>
#include <linux/module.h>
@@ -34,9 +36,59 @@ static const struct regmap_config a53pll_regmap_config = {
.fast_io = true,
};
+static struct pll_freq_tbl *qcom_a53pll_get_freq_tbl(struct device *dev)
+{
+ struct pll_freq_tbl *freq_tbl;
+ unsigned long xo_freq;
+ unsigned long freq;
+ struct clk *xo_clk;
+ int count;
+ int ret;
+ int i;
+
+ xo_clk = devm_clk_get(dev, "xo");
+ if (IS_ERR(xo_clk))
+ return NULL;
+
+ xo_freq = clk_get_rate(xo_clk);
+
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret)
+ return NULL;
+
+ count = dev_pm_opp_get_opp_count(dev);
+ if (count <= 0)
+ return NULL;
+
+ freq_tbl = devm_kcalloc(dev, count + 1, sizeof(*freq_tbl), GFP_KERNEL);
+ if (!freq_tbl)
+ return NULL;
+
+ for (i = 0, freq = 0; i < count; i++, freq++) {
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ return NULL;
+
+ /* Skip the freq that is not divisible */
+ if (freq % xo_freq)
+ continue;
+
+ freq_tbl[i].freq = freq;
+ freq_tbl[i].l = freq / xo_freq;
+ freq_tbl[i].n = 1;
+
+ dev_pm_opp_put(opp);
+ }
+
+ return freq_tbl;
+}
+
static int qcom_a53pll_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct regmap *regmap;
struct resource *res;
struct clk_pll *pll;
@@ -64,13 +116,22 @@ static int qcom_a53pll_probe(struct platform_device *pdev)
pll->mode_reg = 0x00;
pll->status_reg = 0x1c;
pll->status_bit = 16;
- pll->freq_tbl = a53pll_freq;
- init.name = "a53pll";
+ pll->freq_tbl = qcom_a53pll_get_freq_tbl(dev);
+ if (!pll->freq_tbl) {
+ /* Fall on a53pll_freq if no freq_tbl is found from OPP */
+ pll->freq_tbl = a53pll_freq;
+ }
+
+ /* Use an unique name by appending @unit-address */
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "a53pll%s",
+ strchrnul(np->full_name, '@'));
+ if (!init.name)
+ return -ENOMEM;
+
init.parent_names = (const char *[]){ "xo" };
init.num_parents = 1;
init.ops = &clk_pll_sr2_ops;
- init.flags = CLK_IS_CRITICAL;
pll->clkr.hw.init = &init;
ret = devm_clk_register_regmap(dev, &pll->clkr);
@@ -91,6 +152,7 @@ static int qcom_a53pll_probe(struct platform_device *pdev)
static const struct of_device_id qcom_a53pll_match_table[] = {
{ .compatible = "qcom,msm8916-a53pll" },
+ { .compatible = "qcom,msm8939-a53pll" },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_a53pll_match_table);
diff --git a/drivers/clk/qcom/apcs-msm8916.c b/drivers/clk/qcom/apcs-msm8916.c
index cf69a97d0439..89e0730810ac 100644
--- a/drivers/clk/qcom/apcs-msm8916.c
+++ b/drivers/clk/qcom/apcs-msm8916.c
@@ -46,6 +46,7 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device *parent = dev->parent;
+ struct device_node *np = parent->of_node;
struct clk_regmap_mux_div *a53cc;
struct regmap *regmap;
struct clk_init_data init = { };
@@ -61,11 +62,16 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
if (!a53cc)
return -ENOMEM;
- init.name = "a53mux";
+ /* Use an unique name by appending parent's @unit-address */
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "a53mux%s",
+ strchrnul(np->full_name, '@'));
+ if (!init.name)
+ return -ENOMEM;
+
init.parent_data = pdata;
init.num_parents = ARRAY_SIZE(pdata);
init.ops = &clk_regmap_mux_div_ops;
- init.flags = CLK_SET_RATE_PARENT;
+ init.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT;
a53cc->clkr.hw.init = &init;
a53cc->clkr.regmap = regmap;
diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c
index 9bcf2f8ed4de..ce73ee9037cb 100644
--- a/drivers/clk/qcom/camcc-sc7180.c
+++ b/drivers/clk/qcom/camcc-sc7180.c
@@ -1652,32 +1652,35 @@ static int cam_cc_sc7180_probe(struct platform_device *pdev)
struct regmap *regmap;
int ret;
- pm_runtime_enable(&pdev->dev);
- ret = pm_clk_create(&pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_pm_clk_create(&pdev->dev);
if (ret < 0)
return ret;
ret = pm_clk_add(&pdev->dev, "xo");
if (ret < 0) {
dev_err(&pdev->dev, "Failed to acquire XO clock\n");
- goto disable_pm_runtime;
+ return ret;
}
ret = pm_clk_add(&pdev->dev, "iface");
if (ret < 0) {
dev_err(&pdev->dev, "Failed to acquire iface clock\n");
- goto disable_pm_runtime;
+ return ret;
}
ret = pm_runtime_get(&pdev->dev);
if (ret)
- goto destroy_pm_clk;
+ return ret;
regmap = qcom_cc_map(pdev, &cam_cc_sc7180_desc);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
pm_runtime_put(&pdev->dev);
- goto destroy_pm_clk;
+ return ret;
}
clk_fabia_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
@@ -1689,18 +1692,10 @@ static int cam_cc_sc7180_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register CAM CC clocks\n");
- goto destroy_pm_clk;
+ return ret;
}
return 0;
-
-destroy_pm_clk:
- pm_clk_destroy(&pdev->dev);
-
-disable_pm_runtime:
- pm_runtime_disable(&pdev->dev);
-
- return ret;
}
static const struct dev_pm_ops cam_cc_pm_ops = {
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 552d1cbfea4c..441d7a20e6f3 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -536,6 +536,26 @@ static const struct clk_rpmh_desc clk_rpmh_sc7280 = {
.num_clks = ARRAY_SIZE(sc7280_rpmh_clocks),
};
+DEFINE_CLK_RPMH_VRM(sm6350, ln_bb_clk2, ln_bb_clk2_ao, "lnbclkg2", 4);
+DEFINE_CLK_RPMH_VRM(sm6350, ln_bb_clk3, ln_bb_clk3_ao, "lnbclkg3", 4);
+DEFINE_CLK_RPMH_ARC(sm6350, qlink, qlink_ao, "qphy.lvl", 0x1, 4);
+
+static struct clk_hw *sm6350_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sc7280_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sc7280_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sm6350_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sm6350_ln_bb_clk2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &sm6350_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &sm6350_ln_bb_clk3_ao.hw,
+ [RPMH_QLINK_CLK] = &sm6350_qlink.hw,
+ [RPMH_QLINK_CLK_A] = &sm6350_qlink_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sm6350 = {
+ .clks = sm6350_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sm6350_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -623,6 +643,7 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
{ .compatible = "qcom,sdx55-rpmh-clk", .data = &clk_rpmh_sdx55},
+ { .compatible = "qcom,sm6350-rpmh-clk", .data = &clk_rpmh_sm6350},
{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
{ .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250},
{ .compatible = "qcom,sm8350-rpmh-clk", .data = &clk_rpmh_sm8350},
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index b2c142f3a649..66d7807ee38e 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -913,10 +913,166 @@ static const struct rpm_smd_clk_desc rpm_clk_sdm660 = {
.num_clks = ARRAY_SIZE(sdm660_clks),
};
+static struct clk_smd_rpm *mdm9607_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
+ [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_QPIC_CLK] = &qcs404_qpic_clk,
+ [RPM_SMD_QPIC_CLK_A] = &qcs404_qpic_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
+ [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_mdm9607 = {
+ .clks = mdm9607_clks,
+ .num_clks = ARRAY_SIZE(mdm9607_clks),
+};
+
+static struct clk_smd_rpm *msm8953_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
+ [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
+ [RPM_SMD_SYSMMNOC_CLK] = &msm8936_sysmmnoc_clk,
+ [RPM_SMD_SYSMMNOC_A_CLK] = &msm8936_sysmmnoc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
+ [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
+ [RPM_SMD_RF_CLK3] = &msm8992_ln_bb_clk,
+ [RPM_SMD_RF_CLK3_A] = &msm8992_ln_bb_a_clk,
+ [RPM_SMD_DIV_CLK2] = &msm8974_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &msm8974_div_a_clk2,
+ [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8953 = {
+ .clks = msm8953_clks,
+ .num_clks = ARRAY_SIZE(msm8953_clks),
+};
+
+/* SM6125 */
+DEFINE_CLK_SMD_RPM(sm6125, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(sm6125, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
+DEFINE_CLK_SMD_RPM_BRANCH(sm6125, qdss_clk, qdss_a_clk,
+ QCOM_SMD_RPM_MISC_CLK, 1, 19200000);
+DEFINE_CLK_SMD_RPM(sm6125, qup_clk, qup_a_clk, QCOM_SMD_RPM_QUP_CLK, 0);
+DEFINE_CLK_SMD_RPM(sm6125, mmnrt_clk, mmnrt_a_clk, QCOM_SMD_RPM_MMAXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(sm6125, mmrt_clk, mmrt_a_clk, QCOM_SMD_RPM_MMAXI_CLK, 1);
+DEFINE_CLK_SMD_RPM(sm6125, snoc_periph_clk, snoc_periph_a_clk,
+ QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(sm6125, snoc_lpass_clk, snoc_lpass_a_clk,
+ QCOM_SMD_RPM_BUS_CLK, 5);
+
+static struct clk_smd_rpm *sm6125_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
+ [RPM_SMD_SNOC_CLK] = &sm6125_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &sm6125_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &sm6125_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &sm6125_qdss_a_clk,
+ [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
+ [RPM_SMD_CNOC_CLK] = &sm6125_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &sm6125_cnoc_a_clk,
+ [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
+ [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
+ [RPM_SMD_LN_BB_CLK1] = &msm8916_bb_clk1,
+ [RPM_SMD_LN_BB_CLK1_A] = &msm8916_bb_clk1_a,
+ [RPM_SMD_LN_BB_CLK2] = &msm8916_bb_clk2,
+ [RPM_SMD_LN_BB_CLK2_A] = &msm8916_bb_clk2_a,
+ [RPM_SMD_LN_BB_CLK3] = &sdm660_ln_bb_clk3,
+ [RPM_SMD_LN_BB_CLK3_A] = &sdm660_ln_bb_clk3_a,
+ [RPM_SMD_QUP_CLK] = &sm6125_qup_clk,
+ [RPM_SMD_QUP_A_CLK] = &sm6125_qup_a_clk,
+ [RPM_SMD_MMRT_CLK] = &sm6125_mmrt_clk,
+ [RPM_SMD_MMRT_A_CLK] = &sm6125_mmrt_a_clk,
+ [RPM_SMD_MMNRT_CLK] = &sm6125_mmnrt_clk,
+ [RPM_SMD_MMNRT_A_CLK] = &sm6125_mmnrt_a_clk,
+ [RPM_SMD_SNOC_PERIPH_CLK] = &sm6125_snoc_periph_clk,
+ [RPM_SMD_SNOC_PERIPH_A_CLK] = &sm6125_snoc_periph_a_clk,
+ [RPM_SMD_SNOC_LPASS_CLK] = &sm6125_snoc_lpass_clk,
+ [RPM_SMD_SNOC_LPASS_A_CLK] = &sm6125_snoc_lpass_a_clk,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_sm6125 = {
+ .clks = sm6125_clks,
+ .num_clks = ARRAY_SIZE(sm6125_clks),
+};
+
+/* SM6115 */
+static struct clk_smd_rpm *sm6115_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
+ [RPM_SMD_SNOC_CLK] = &sm6125_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &sm6125_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &sm6125_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &sm6125_qdss_a_clk,
+ [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
+ [RPM_SMD_CNOC_CLK] = &sm6125_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &sm6125_cnoc_a_clk,
+ [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
+ [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
+ [RPM_SMD_QUP_CLK] = &sm6125_qup_clk,
+ [RPM_SMD_QUP_A_CLK] = &sm6125_qup_a_clk,
+ [RPM_SMD_MMRT_CLK] = &sm6125_mmrt_clk,
+ [RPM_SMD_MMRT_A_CLK] = &sm6125_mmrt_a_clk,
+ [RPM_SMD_MMNRT_CLK] = &sm6125_mmnrt_clk,
+ [RPM_SMD_MMNRT_A_CLK] = &sm6125_mmnrt_a_clk,
+ [RPM_SMD_SNOC_PERIPH_CLK] = &sm6125_snoc_periph_clk,
+ [RPM_SMD_SNOC_PERIPH_A_CLK] = &sm6125_snoc_periph_a_clk,
+ [RPM_SMD_SNOC_LPASS_CLK] = &sm6125_snoc_lpass_clk,
+ [RPM_SMD_SNOC_LPASS_A_CLK] = &sm6125_snoc_lpass_a_clk,
+ [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_sm6115 = {
+ .clks = sm6115_clks,
+ .num_clks = ARRAY_SIZE(sm6115_clks),
+};
+
static const struct of_device_id rpm_smd_clk_match_table[] = {
+ { .compatible = "qcom,rpmcc-mdm9607", .data = &rpm_clk_mdm9607 },
{ .compatible = "qcom,rpmcc-msm8226", .data = &rpm_clk_msm8974 },
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8936", .data = &rpm_clk_msm8936 },
+ { .compatible = "qcom,rpmcc-msm8953", .data = &rpm_clk_msm8953 },
{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
{ .compatible = "qcom,rpmcc-msm8976", .data = &rpm_clk_msm8976 },
{ .compatible = "qcom,rpmcc-msm8992", .data = &rpm_clk_msm8992 },
@@ -925,6 +1081,8 @@ static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 },
{ .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 },
{ .compatible = "qcom,rpmcc-sdm660", .data = &rpm_clk_sdm660 },
+ { .compatible = "qcom,rpmcc-sm6115", .data = &rpm_clk_sm6115 },
+ { .compatible = "qcom,rpmcc-sm6125", .data = &rpm_clk_sm6125 },
{ }
};
MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
diff --git a/drivers/clk/qcom/dispcc-sc7280.c b/drivers/clk/qcom/dispcc-sc7280.c
new file mode 100644
index 000000000000..4ef4ae231794
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sc7280.c
@@ -0,0 +1,908 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,dispcc-sc7280.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_EVEN,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DP_PHY_PLL_LINK_CLK,
+ P_DP_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_EDP_PHY_PLL_LINK_CLK,
+ P_EDP_PHY_PLL_VCO_DIV_CLK,
+ P_GCC_DISP_GPLL0_CLK,
+};
+
+static const struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+/* 1520MHz Configuration*/
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0x4F,
+ .alpha = 0x2AAA,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP_PHY_PLL_LINK_CLK, 1 },
+ { P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "dp_phy_pll_link_clk" },
+ { .fw_name = "dp_phy_pll_vco_div_clk" },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "dsi0_phy_pll_out_byteclk" },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_EDP_PHY_PLL_LINK_CLK, 1 },
+ { P_EDP_PHY_PLL_VCO_DIV_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "edp_phy_pll_link_clk" },
+ { .fw_name = "edp_phy_pll_vco_div_clk" },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_GCC_DISP_GPLL0_CLK, 4 },
+ { P_DISP_CC_PLL0_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .fw_name = "gcc_disp_gpll0_clk" },
+ { .hw = &disp_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_DISP_GPLL0_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "gcc_disp_gpll0_clk" },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "dsi0_phy_pll_out_dsiclk" },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GCC_DISP_GPLL0_CLK, 16, 0, 0),
+ F(75000000, P_GCC_DISP_GPLL0_CLK, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x1170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x10d8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+ .cmd_rcgr = 0x1158,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+ .cmd_rcgr = 0x1128,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_crypto_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+ .cmd_rcgr = 0x110c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+ .cmd_rcgr = 0x1140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_pixel_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_edp_aux_clk_src = {
+ .cmd_rcgr = 0x11d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_edp_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_edp_link_clk_src = {
+ .cmd_rcgr = 0x11a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_edp_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_edp_pixel_clk_src = {
+ .cmd_rcgr = 0x1188,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_edp_pixel_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x10f4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(200000000, P_GCC_DISP_GPLL0_CLK, 3, 0, 0),
+ F(300000000, P_GCC_DISP_GPLL0_CLK, 2, 0, 0),
+ F(380000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(506666667, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(608000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x1090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x1078,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x10a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x10c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x10f0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
+ .reg = 0x1124,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_edp_link_div_clk_src = {
+ .reg = 0x11b8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_edp_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_edp_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x1050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x1030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x1034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+ .halt_reg = 0x104c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x104c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_dp_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
+ .halt_reg = 0x1044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_dp_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+ .halt_reg = 0x103c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x103c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+ .halt_reg = 0x1040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_dp_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+ .halt_reg = 0x1048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_pixel_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_dp_pixel_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_edp_aux_clk = {
+ .halt_reg = 0x1060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_edp_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_edp_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_edp_link_clk = {
+ .halt_reg = 0x1058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_edp_link_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_edp_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_edp_link_intf_clk = {
+ .halt_reg = 0x105c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x105c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_edp_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_edp_link_div_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_edp_pixel_clk = {
+ .halt_reg = 0x1054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_edp_pixel_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_edp_pixel_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x1038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x1014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x1024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x1010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x101c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_rot_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0x200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0x2008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x102c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x102c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_sleep_clk = {
+ .halt_reg = 0x5004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc disp_cc_mdss_core_gdsc = {
+ .gdscr = 0x1004,
+ .pd = {
+ .name = "disp_cc_mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_sc7280_clocks[] = {
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+ [DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
+ [DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] = &disp_cc_mdss_dp_crypto_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+ [DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC] =
+ &disp_cc_mdss_dp_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
+ [DISP_CC_MDSS_EDP_AUX_CLK] = &disp_cc_mdss_edp_aux_clk.clkr,
+ [DISP_CC_MDSS_EDP_AUX_CLK_SRC] = &disp_cc_mdss_edp_aux_clk_src.clkr,
+ [DISP_CC_MDSS_EDP_LINK_CLK] = &disp_cc_mdss_edp_link_clk.clkr,
+ [DISP_CC_MDSS_EDP_LINK_CLK_SRC] = &disp_cc_mdss_edp_link_clk_src.clkr,
+ [DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC] =
+ &disp_cc_mdss_edp_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_EDP_LINK_INTF_CLK] = &disp_cc_mdss_edp_link_intf_clk.clkr,
+ [DISP_CC_MDSS_EDP_PIXEL_CLK] = &disp_cc_mdss_edp_pixel_clk.clkr,
+ [DISP_CC_MDSS_EDP_PIXEL_CLK_SRC] = &disp_cc_mdss_edp_pixel_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
+};
+
+static struct gdsc *disp_cc_sc7280_gdscs[] = {
+ [DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc,
+};
+
+static const struct regmap_config disp_cc_sc7280_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_sc7280_desc = {
+ .config = &disp_cc_sc7280_regmap_config,
+ .clks = disp_cc_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sc7280_clocks),
+ .gdscs = disp_cc_sc7280_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sc7280_gdscs),
+};
+
+static const struct of_device_id disp_cc_sc7280_match_table[] = {
+ { .compatible = "qcom,sc7280-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sc7280_match_table);
+
+static int disp_cc_sc7280_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sc7280_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+
+ /*
+ * Keep the clocks always-ON
+ * DISP_CC_XO_CLK
+ */
+ regmap_update_bits(regmap, 0x5008, BIT(0), BIT(0));
+
+ return qcom_cc_really_probe(pdev, &disp_cc_sc7280_desc, regmap);
+}
+
+static struct platform_driver disp_cc_sc7280_driver = {
+ .probe = disp_cc_sc7280_probe,
+ .driver = {
+ .name = "disp_cc-sc7280",
+ .of_match_table = disp_cc_sc7280_match_table,
+ },
+};
+
+static int __init disp_cc_sc7280_init(void)
+{
+ return platform_driver_register(&disp_cc_sc7280_driver);
+}
+subsys_initcall(disp_cc_sc7280_init);
+
+static void __exit disp_cc_sc7280_exit(void)
+{
+ platform_driver_unregister(&disp_cc_sc7280_driver);
+}
+module_exit(disp_cc_sc7280_exit);
+
+MODULE_DESCRIPTION("QTI DISP_CC sc7280 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index 601c7c0ba483..bf9ffe1a1cf4 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -26,6 +26,10 @@ enum {
P_DISP_CC_PLL1_OUT_MAIN,
P_DP_PHY_PLL_LINK_CLK,
P_DP_PHY_PLL_VCO_DIV_CLK,
+ P_DPTX1_PHY_PLL_LINK_CLK,
+ P_DPTX1_PHY_PLL_VCO_DIV_CLK,
+ P_DPTX2_PHY_PLL_LINK_CLK,
+ P_DPTX2_PHY_PLL_VCO_DIV_CLK,
P_EDP_PHY_PLL_LINK_CLK,
P_EDP_PHY_PLL_VCO_DIV_CLK,
P_DSI0_PHY_PLL_OUT_BYTECLK,
@@ -98,12 +102,20 @@ static const struct parent_map disp_cc_parent_map_0[] = {
{ P_BI_TCXO, 0 },
{ P_DP_PHY_PLL_LINK_CLK, 1 },
{ P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DPTX1_PHY_PLL_LINK_CLK, 3 },
+ { P_DPTX1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DPTX2_PHY_PLL_LINK_CLK, 5 },
+ { P_DPTX2_PHY_PLL_VCO_DIV_CLK, 6 },
};
static const struct clk_parent_data disp_cc_parent_data_0[] = {
{ .fw_name = "bi_tcxo" },
{ .fw_name = "dp_phy_pll_link_clk" },
{ .fw_name = "dp_phy_pll_vco_div_clk" },
+ { .fw_name = "dptx1_phy_pll_link_clk" },
+ { .fw_name = "dptx1_phy_pll_vco_div_clk" },
+ { .fw_name = "dptx2_phy_pll_link_clk" },
+ { .fw_name = "dptx2_phy_pll_vco_div_clk" },
};
static const struct parent_map disp_cc_parent_map_1[] = {
@@ -269,20 +281,11 @@ static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
},
};
-static const struct freq_tbl ftbl_disp_cc_mdss_dp_link1_clk_src[] = {
- F(162000000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(270000000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(540000000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(810000000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
- { }
-};
-
static struct clk_rcg2 disp_cc_mdss_dp_link1_clk_src = {
.cmd_rcgr = 0x220c,
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_dp_link1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "disp_cc_mdss_dp_link1_clk_src",
.parent_data = disp_cc_parent_data_0,
@@ -296,7 +299,6 @@ static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_dp_link1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "disp_cc_mdss_dp_link_clk_src",
.parent_data = disp_cc_parent_data_0,
diff --git a/drivers/clk/qcom/gcc-msm8953.c b/drivers/clk/qcom/gcc-msm8953.c
new file mode 100644
index 000000000000..49513f1366ff
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8953.c
@@ -0,0 +1,4250 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2021, The Linux Foundation. All rights reserved.
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8953.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_XO,
+ P_SLEEP_CLK,
+ P_GPLL0,
+ P_GPLL0_DIV2,
+ P_GPLL2,
+ P_GPLL3,
+ P_GPLL4,
+ P_GPLL6,
+ P_GPLL6_DIV2,
+ P_DSI0PLL,
+ P_DSI0PLL_BYTE,
+ P_DSI1PLL,
+ P_DSI1PLL_BYTE,
+};
+
+static struct clk_alpha_pll gpll0_early = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll0_early",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gpll0_early_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_early_div",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0_early.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0 = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0_early.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll2_early = {
+ .offset = 0x4a000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll2_early",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll2 = {
+ .offset = 0x4a000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll2_early.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static const struct pll_vco gpll3_p_vco[] = {
+ { 1000000000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config gpll3_early_config = {
+ .l = 63,
+ .config_ctl_val = 0x4001055b,
+ .early_output_mask = 0,
+ .post_div_mask = GENMASK(11, 8),
+ .post_div_val = BIT(8),
+};
+
+static struct clk_alpha_pll gpll3_early = {
+ .offset = 0x22000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = gpll3_p_vco,
+ .num_vco = ARRAY_SIZE(gpll3_p_vco),
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3_early",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll3 = {
+ .offset = 0x22000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll3_early.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll gpll4_early = {
+ .offset = 0x24000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4_early",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4 = {
+ .offset = 0x24000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll4_early.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll6_early = {
+ .offset = 0x37000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6_early",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gpll6_early_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6_early_div",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll6_early.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll6 = {
+ .offset = 0x37000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll6_early.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll0div2_2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_DIV2, 2 },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll0div2_4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll0div2_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+};
+
+static const struct parent_map gcc_apc_droop_detector_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 2 },
+};
+
+static const struct clk_parent_data gcc_apc_droop_detector_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_apc_droop_detector_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(576000000, P_GPLL4, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apc0_droop_detector_clk_src = {
+ .cmd_rcgr = 0x78008,
+ .hid_width = 5,
+ .freq_tbl = ftbl_apc_droop_detector_clk_src,
+ .parent_map = gcc_apc_droop_detector_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "apc0_droop_detector_clk_src",
+ .parent_data = gcc_apc_droop_detector_data,
+ .num_parents = ARRAY_SIZE(gcc_apc_droop_detector_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+static struct clk_rcg2 apc1_droop_detector_clk_src = {
+ .cmd_rcgr = 0x79008,
+ .hid_width = 5,
+ .freq_tbl = ftbl_apc_droop_detector_clk_src,
+ .parent_map = gcc_apc_droop_detector_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "apc1_droop_detector_clk_src",
+ .parent_data = gcc_apc_droop_detector_data,
+ .num_parents = ARRAY_SIZE(gcc_apc_droop_detector_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_apss_ahb_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_DIV2, 16, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_ahb_clk_src = {
+ .cmd_rcgr = 0x46000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_apss_ahb_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "apss_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_DIV2, 16, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0200c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x03000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x04000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x05000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0c00c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0d000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0f000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x18000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(12500000, P_GPLL0_DIV2, 16, 1, 2),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x02024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x03014,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x04024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x05024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0c024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_qup1_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0d014,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_qup2_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0f024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_qup3_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x18024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
+ F(3686400, P_GPLL0_DIV2, 1, 144, 15625),
+ F(7372800, P_GPLL0_DIV2, 1, 288, 15625),
+ F(14745600, P_GPLL0_DIV2, 1, 576, 15625),
+ F(16000000, P_GPLL0_DIV2, 5, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 1, 3, 100),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(32000000, P_GPLL0, 1, 1, 25),
+ F(40000000, P_GPLL0, 1, 1, 20),
+ F(46400000, P_GPLL0, 1, 29, 500),
+ F(48000000, P_GPLL0, 1, 3, 50),
+ F(51200000, P_GPLL0, 1, 8, 125),
+ F(56000000, P_GPLL0, 1, 7, 100),
+ F(58982400, P_GPLL0, 1, 1152, 15625),
+ F(60000000, P_GPLL0, 1, 3, 40),
+ F(64000000, P_GPLL0, 1, 2, 25),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x02044,
+ .hid_width = 5,
+ .mnd_width = 16,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x03034,
+ .hid_width = 5,
+ .mnd_width = 16,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x0c044,
+ .hid_width = 5,
+ .mnd_width = 16,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_uart1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x0d034,
+ .hid_width = 5,
+ .mnd_width = 16,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp2_uart2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_byte0_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 3 },
+};
+
+static const struct parent_map gcc_byte1_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 3 },
+ { P_DSI1PLL_BYTE, 1 },
+};
+
+static const struct clk_parent_data gcc_byte_data[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" },
+ { .fw_name = "dsi1pllbyte", .name = "dsi1pllbyte" },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x4d044,
+ .hid_width = 5,
+ .parent_map = gcc_byte0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "byte0_clk_src",
+ .parent_data = gcc_byte_data,
+ .num_parents = ARRAY_SIZE(gcc_byte_data),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x4d0b0,
+ .hid_width = 5,
+ .parent_map = gcc_byte1_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "byte1_clk_src",
+ .parent_data = gcc_byte_data,
+ .num_parents = ARRAY_SIZE(gcc_byte_data),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static const struct parent_map gcc_gp_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 2 },
+ { P_GPLL0_DIV2, 4 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_gp_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+ { .fw_name = "sleep", .name = "sleep" },
+};
+
+static const struct freq_tbl ftbl_camss_gp_clk_src[] = {
+ F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+ .cmd_rcgr = 0x54000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_camss_gp_clk_src,
+ .parent_map = gcc_gp_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "camss_gp0_clk_src",
+ .parent_data = gcc_gp_data,
+ .num_parents = ARRAY_SIZE(gcc_gp_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+ .cmd_rcgr = 0x55000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_camss_gp_clk_src,
+ .parent_map = gcc_gp_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "camss_gp1_clk_src",
+ .parent_data = gcc_gp_data,
+ .num_parents = ARRAY_SIZE(gcc_gp_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_camss_top_ahb_clk_src[] = {
+ F(40000000, P_GPLL0_DIV2, 10, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_top_ahb_clk_src = {
+ .cmd_rcgr = 0x5a000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_camss_top_ahb_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "camss_top_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_cci_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 2 },
+ { P_GPLL0_DIV2, 3 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_cci_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+ { .fw_name = "sleep", .name = "sleep" },
+};
+
+static const struct freq_tbl ftbl_cci_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0_DIV2, 1, 3, 32),
+ { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+ .cmd_rcgr = 0x51000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_cci_clk_src,
+ .parent_map = gcc_cci_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "cci_clk_src",
+ .parent_data = gcc_cci_data,
+ .num_parents = ARRAY_SIZE(gcc_cci_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_cpp_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 3 },
+ { P_GPLL2, 4 },
+ { P_GPLL0_DIV2, 5 },
+};
+
+static const struct clk_parent_data gcc_cpp_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+};
+
+static const struct freq_tbl ftbl_cpp_clk_src[] = {
+ F(100000000, P_GPLL0_DIV2, 4, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(465000000, P_GPLL2, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+ .cmd_rcgr = 0x58018,
+ .hid_width = 5,
+ .freq_tbl = ftbl_cpp_clk_src,
+ .parent_map = gcc_cpp_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "cpp_clk_src",
+ .parent_data = gcc_cpp_data,
+ .num_parents = ARRAY_SIZE(gcc_cpp_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_crypto_clk_src[] = {
+ F(40000000, P_GPLL0_DIV2, 10, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 crypto_clk_src = {
+ .cmd_rcgr = 0x16004,
+ .hid_width = 5,
+ .freq_tbl = ftbl_crypto_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "crypto_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_csi0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 4 },
+ { P_GPLL0_DIV2, 5 },
+};
+
+static const struct parent_map gcc_csi12_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 5 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static const struct clk_parent_data gcc_csi_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+};
+
+static const struct freq_tbl ftbl_csi_clk_src[] = {
+ F(100000000, P_GPLL0_DIV2, 4, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(310000000, P_GPLL2, 3, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(465000000, P_GPLL2, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x4e020,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_clk_src,
+ .parent_map = gcc_csi0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi0_clk_src",
+ .parent_data = gcc_csi_data,
+ .num_parents = ARRAY_SIZE(gcc_csi_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x4f020,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_clk_src,
+ .parent_map = gcc_csi12_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi1_clk_src",
+ .parent_data = gcc_csi_data,
+ .num_parents = ARRAY_SIZE(gcc_csi_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+ .cmd_rcgr = 0x3c020,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_clk_src,
+ .parent_map = gcc_csi12_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi2_clk_src",
+ .parent_data = gcc_csi_data,
+ .num_parents = ARRAY_SIZE(gcc_csi_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_csip_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 3 },
+ { P_GPLL2, 4 },
+ { P_GPLL0_DIV2, 5 },
+};
+
+static const struct clk_parent_data gcc_csip_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+};
+
+static const struct freq_tbl ftbl_csi_p_clk_src[] = {
+ F(66670000, P_GPLL0_DIV2, 6, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(310000000, P_GPLL2, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0p_clk_src = {
+ .cmd_rcgr = 0x58084,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_p_clk_src,
+ .parent_map = gcc_csip_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi0p_clk_src",
+ .parent_data = gcc_csip_data,
+ .num_parents = ARRAY_SIZE(gcc_csip_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 csi1p_clk_src = {
+ .cmd_rcgr = 0x58094,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_p_clk_src,
+ .parent_map = gcc_csip_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi1p_clk_src",
+ .parent_data = gcc_csip_data,
+ .num_parents = ARRAY_SIZE(gcc_csip_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 csi2p_clk_src = {
+ .cmd_rcgr = 0x580a4,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_p_clk_src,
+ .parent_map = gcc_csip_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi2p_clk_src",
+ .parent_data = gcc_csip_data,
+ .num_parents = ARRAY_SIZE(gcc_csip_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_csi_phytimer_clk_src[] = {
+ F(100000000, P_GPLL0_DIV2, 4, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x4e000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_phytimer_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi0phytimer_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x4f000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_phytimer_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi1phytimer_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x4f05c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_phytimer_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi2phytimer_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_esc_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 3 },
+};
+
+static const struct clk_parent_data gcc_esc_vsync_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_esc0_1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x4d05c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_esc0_1_clk_src,
+ .parent_map = gcc_esc_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "esc0_clk_src",
+ .parent_data = gcc_esc_vsync_data,
+ .num_parents = ARRAY_SIZE(gcc_esc_vsync_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x4d0a8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_esc0_1_clk_src,
+ .parent_map = gcc_esc_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "esc1_clk_src",
+ .parent_data = gcc_esc_vsync_data,
+ .num_parents = ARRAY_SIZE(gcc_esc_vsync_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_gfx3d_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL3, 2 },
+ { P_GPLL6, 3 },
+ { P_GPLL4, 4 },
+ { P_GPLL0_DIV2, 5 },
+ { P_GPLL6_DIV2, 6 },
+};
+
+static const struct clk_parent_data gcc_gfx3d_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+ { .hw = &gpll6_early_div.hw },
+};
+
+static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+ F(80000000, P_GPLL0_DIV2, 5, 0, 0),
+ F(100000000, P_GPLL0_DIV2, 4, 0, 0),
+ F(133330000, P_GPLL0_DIV2, 3, 0, 0),
+ F(160000000, P_GPLL0_DIV2, 2.5, 0, 0),
+ F(200000000, P_GPLL0_DIV2, 2, 0, 0),
+ F(266670000, P_GPLL0, 3.0, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(460800000, P_GPLL4, 2.5, 0, 0),
+ F(510000000, P_GPLL3, 2, 0, 0),
+ F(560000000, P_GPLL3, 2, 0, 0),
+ F(600000000, P_GPLL3, 2, 0, 0),
+ F(650000000, P_GPLL3, 2, 0, 0),
+ F(685000000, P_GPLL3, 2, 0, 0),
+ F(725000000, P_GPLL3, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x59000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gfx3d_clk_src,
+ .parent_map = gcc_gfx3d_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gfx3d_clk_src",
+ .parent_data = gcc_gfx3d_data,
+ .num_parents = ARRAY_SIZE(gcc_gfx3d_data),
+ .ops = &clk_rcg2_floor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static const struct freq_tbl ftbl_gp_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x08004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gp_clk_src,
+ .parent_map = gcc_gp_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp1_clk_src",
+ .parent_data = gcc_gp_data,
+ .num_parents = ARRAY_SIZE(gcc_gp_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x09004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gp_clk_src,
+ .parent_map = gcc_gp_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp2_clk_src",
+ .parent_data = gcc_gp_data,
+ .num_parents = ARRAY_SIZE(gcc_gp_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x0a004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gp_clk_src,
+ .parent_map = gcc_gp_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp3_clk_src",
+ .parent_data = gcc_gp_data,
+ .num_parents = ARRAY_SIZE(gcc_gp_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_jpeg0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 2 },
+ { P_GPLL0_DIV2, 4 },
+ { P_GPLL2, 5 },
+};
+
+static const struct clk_parent_data gcc_jpeg0_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+ { .hw = &gpll2.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_jpeg0_clk_src[] = {
+ F(66670000, P_GPLL0_DIV2, 6, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(310000000, P_GPLL2, 3, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+ .cmd_rcgr = 0x57000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_jpeg0_clk_src,
+ .parent_map = gcc_jpeg0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "jpeg0_clk_src",
+ .parent_data = gcc_jpeg0_data,
+ .num_parents = ARRAY_SIZE(gcc_jpeg0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_mclk_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 2 },
+ { P_GPLL0_DIV2, 4 },
+ { P_GPLL6_DIV2, 5 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_mclk_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+ { .hw = &gpll6_early_div.hw },
+ { .fw_name = "sleep", .name = "sleep" },
+};
+
+static const struct freq_tbl ftbl_mclk_clk_src[] = {
+ F(19200000, P_GPLL6, 5, 4, 45),
+ F(24000000, P_GPLL6_DIV2, 1, 2, 45),
+ F(26000000, P_GPLL0, 1, 4, 123),
+ F(33330000, P_GPLL0_DIV2, 12, 0, 0),
+ F(36610000, P_GPLL6, 1, 2, 59),
+ F(66667000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x52000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .parent_map = gcc_mclk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mclk0_clk_src",
+ .parent_data = gcc_mclk_data,
+ .num_parents = ARRAY_SIZE(gcc_mclk_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x53000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .parent_map = gcc_mclk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mclk1_clk_src",
+ .parent_data = gcc_mclk_data,
+ .num_parents = ARRAY_SIZE(gcc_mclk_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+ .cmd_rcgr = 0x5c000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .parent_map = gcc_mclk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mclk2_clk_src",
+ .parent_data = gcc_mclk_data,
+ .num_parents = ARRAY_SIZE(gcc_mclk_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 mclk3_clk_src = {
+ .cmd_rcgr = 0x5e000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .parent_map = gcc_mclk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mclk3_clk_src",
+ .parent_data = gcc_mclk_data,
+ .num_parents = ARRAY_SIZE(gcc_mclk_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_mdp_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 3 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static const struct clk_parent_data gcc_mdp_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src[] = {
+ F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+ F(80000000, P_GPLL0_DIV2, 5, 0, 0),
+ F(160000000, P_GPLL0_DIV2, 2.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x4d014,
+ .hid_width = 5,
+ .freq_tbl = ftbl_mdp_clk_src,
+ .parent_map = gcc_mdp_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mdp_clk_src",
+ .parent_data = gcc_mdp_data,
+ .num_parents = ARRAY_SIZE(gcc_mdp_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_pclk0_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 1 },
+ { P_DSI1PLL, 3 },
+};
+
+static const struct parent_map gcc_pclk1_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 3 },
+ { P_DSI1PLL, 1 },
+};
+
+static const struct clk_parent_data gcc_pclk_data[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
+ { .fw_name = "dsi1pll", .name = "dsi1pll" },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x4d000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_pclk0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pclk0_clk_src",
+ .parent_data = gcc_pclk_data,
+ .num_parents = ARRAY_SIZE(gcc_pclk_data),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x4d0b8,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_pclk1_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pclk1_clk_src",
+ .parent_data = gcc_pclk_data,
+ .num_parents = ARRAY_SIZE(gcc_pclk_data),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static const struct freq_tbl ftbl_pdm2_clk_src[] = {
+ F(32000000, P_GPLL0_DIV2, 12.5, 0, 0),
+ F(64000000, P_GPLL0, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x44010,
+ .hid_width = 5,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pdm2_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_rbcpr_gfx_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbcpr_gfx_clk_src = {
+ .cmd_rcgr = 0x3a00c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_rbcpr_gfx_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_4_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "rbcpr_gfx_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_sdcc1_ice_core_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 2 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static const struct clk_parent_data gcc_sdcc1_ice_core_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+};
+
+static const struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
+ F(80000000, P_GPLL0_DIV2, 5, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(270000000, P_GPLL6, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x5d000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_sdcc1_ice_core_clk_src,
+ .parent_map = gcc_sdcc1_ice_core_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "sdcc1_ice_core_clk_src",
+ .parent_data = gcc_sdcc1_ice_core_data,
+ .num_parents = ARRAY_SIZE(gcc_sdcc1_ice_core_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_sdcc_apps_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 2 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static const struct clk_parent_data gcc_sdcc_apss_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+};
+
+static const struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_DIV2, 5, 1, 4),
+ F(25000000, P_GPLL0_DIV2, 16, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(177770000, P_GPLL0, 4.5, 0, 0),
+ F(192000000, P_GPLL4, 6, 0, 0),
+ F(384000000, P_GPLL4, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x42004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_sdcc1_apps_clk_src,
+ .parent_map = gcc_sdcc_apps_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "sdcc1_apps_clk_src",
+ .parent_data = gcc_sdcc_apss_data,
+ .num_parents = ARRAY_SIZE(gcc_sdcc_apss_data),
+ .ops = &clk_rcg2_floor_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_sdcc2_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_DIV2, 5, 1, 4),
+ F(25000000, P_GPLL0_DIV2, 16, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(177770000, P_GPLL0, 4.5, 0, 0),
+ F(192000000, P_GPLL4, 6, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x43004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_sdcc2_apps_clk_src,
+ .parent_map = gcc_sdcc_apps_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "sdcc2_apps_clk_src",
+ .parent_data = gcc_sdcc_apss_data,
+ .num_parents = ARRAY_SIZE(gcc_sdcc_apss_data),
+ .ops = &clk_rcg2_floor_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
+ F(80000000, P_GPLL0_DIV2, 5, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_master_clk_src = {
+ .cmd_rcgr = 0x3f00c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_usb30_master_clk_src,
+ .parent_map = gcc_xo_gpll0_gpll0div2_2_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "usb30_master_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0div2_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0div2_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_usb30_mock_utmi_map[] = {
+ { P_XO, 0 },
+ { P_GPLL6, 1 },
+ { P_GPLL6_DIV2, 2 },
+ { P_GPLL0, 3 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static const struct clk_parent_data gcc_usb30_mock_utmi_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll6_early_div.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+};
+
+static const struct freq_tbl ftbl_usb30_mock_utmi_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(60000000, P_GPLL6_DIV2, 9, 1, 1),
+ { }
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x3f020,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_usb30_mock_utmi_clk_src,
+ .parent_map = gcc_usb30_mock_utmi_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "usb30_mock_utmi_clk_src",
+ .parent_data = gcc_usb30_mock_utmi_data,
+ .num_parents = ARRAY_SIZE(gcc_usb30_mock_utmi_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_usb3_aux_map[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_usb3_aux_data[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "sleep", .name = "sleep" },
+};
+
+static const struct freq_tbl ftbl_usb3_aux_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb3_aux_clk_src = {
+ .cmd_rcgr = 0x3f05c,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_usb3_aux_clk_src,
+ .parent_map = gcc_usb3_aux_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "usb3_aux_clk_src",
+ .parent_data = gcc_usb3_aux_data,
+ .num_parents = ARRAY_SIZE(gcc_usb3_aux_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_vcodec0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 2 },
+ { P_GPLL2, 3 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static const struct clk_parent_data gcc_vcodec0_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+};
+
+static const struct freq_tbl ftbl_vcodec0_clk_src[] = {
+ F(114290000, P_GPLL0_DIV2, 3.5, 0, 0),
+ F(228570000, P_GPLL0, 3.5, 0, 0),
+ F(310000000, P_GPLL2, 3, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(465000000, P_GPLL2, 2, 0, 0),
+ F(540000000, P_GPLL6, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vcodec0_clk_src = {
+ .cmd_rcgr = 0x4c000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_vcodec0_clk_src,
+ .parent_map = gcc_vcodec0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "vcodec0_clk_src",
+ .parent_data = gcc_vcodec0_data,
+ .num_parents = ARRAY_SIZE(gcc_vcodec0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_vfe_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 2 },
+ { P_GPLL4, 3 },
+ { P_GPLL2, 4 },
+ { P_GPLL0_DIV2, 5 },
+};
+
+static const struct clk_parent_data gcc_vfe_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+};
+
+static const struct freq_tbl ftbl_vfe_clk_src[] = {
+ F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+ F(100000000, P_GPLL0_DIV2, 4, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(310000000, P_GPLL2, 3, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(465000000, P_GPLL2, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x58000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_vfe_clk_src,
+ .parent_map = gcc_vfe_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "vfe0_clk_src",
+ .parent_data = gcc_vfe_data,
+ .num_parents = ARRAY_SIZE(gcc_vfe_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+ .cmd_rcgr = 0x58054,
+ .hid_width = 5,
+ .freq_tbl = ftbl_vfe_clk_src,
+ .parent_map = gcc_vfe_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "vfe1_clk_src",
+ .parent_data = gcc_vfe_data,
+ .num_parents = ARRAY_SIZE(gcc_vfe_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_vsync_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 2 },
+};
+
+static const struct freq_tbl ftbl_vsync_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x4d02c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_vsync_clk_src,
+ .parent_map = gcc_vsync_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "vsync_clk_src",
+ .parent_data = gcc_esc_vsync_data,
+ .num_parents = ARRAY_SIZE(gcc_esc_vsync_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_branch gcc_apc0_droop_detector_gpll0_clk = {
+ .halt_reg = 0x78004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x78004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_apc0_droop_detector_gpll0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &apc0_droop_detector_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_apc1_droop_detector_gpll0_clk = {
+ .halt_reg = 0x79004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_apc1_droop_detector_gpll0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &apc1_droop_detector_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_apss_ahb_clk = {
+ .halt_reg = 0x4601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_apss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &apss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_apss_axi_clk = {
+ .halt_reg = 0x46020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_apss_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_apss_tcu_async_clk = {
+ .halt_reg = 0x12018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_apss_tcu_async_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x59034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_bimc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_bimc_gpu_clk = {
+ .halt_reg = 0x59030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_bimc_gpu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x01008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+ .halt_reg = 0x0b008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x02008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x02008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x03010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x03010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x04020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x04020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x05020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x05020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+ .halt_reg = 0x0c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+ .halt_reg = 0x0d010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0d010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup2_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+ .halt_reg = 0x0f020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0f020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup3_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x18020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x18020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x02004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x02004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x0300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x0401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x0501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+ .halt_reg = 0x0c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+ .halt_reg = 0x0d00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0d00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+ .halt_reg = 0x0f01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0f01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x1801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x0203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x0302c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+ .halt_reg = 0x0c03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0c03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+ .halt_reg = 0x0d02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0d02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_ahb_clk = {
+ .halt_reg = 0x56004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x56004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_cci_ahb_clk = {
+ .halt_reg = 0x5101c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cci_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_cci_clk = {
+ .halt_reg = 0x51018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cci_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &cci_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_cpp_ahb_clk = {
+ .halt_reg = 0x58040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cpp_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_cpp_axi_clk = {
+ .halt_reg = 0x58064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cpp_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_cpp_clk = {
+ .halt_reg = 0x5803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cpp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &cpp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0_ahb_clk = {
+ .halt_reg = 0x4e040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1_ahb_clk = {
+ .halt_reg = 0x4f040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi2_ahb_clk = {
+ .halt_reg = 0x3c040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3c040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0_clk = {
+ .halt_reg = 0x4e03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1_clk = {
+ .halt_reg = 0x4f03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi2_clk = {
+ .halt_reg = 0x3c03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3c03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0_csiphy_3p_clk = {
+ .halt_reg = 0x58090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0_csiphy_3p_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0p_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1_csiphy_3p_clk = {
+ .halt_reg = 0x580a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x580a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1_csiphy_3p_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1p_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi2_csiphy_3p_clk = {
+ .halt_reg = 0x580b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x580b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2_csiphy_3p_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2p_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0phy_clk = {
+ .halt_reg = 0x4e048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0phy_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1phy_clk = {
+ .halt_reg = 0x4f048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1phy_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi2phy_clk = {
+ .halt_reg = 0x3c048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3c048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2phy_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x4e01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1phytimer_clk = {
+ .halt_reg = 0x4f01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi2phytimer_clk = {
+ .halt_reg = 0x4f068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0pix_clk = {
+ .halt_reg = 0x4e058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0pix_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1pix_clk = {
+ .halt_reg = 0x4f058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1pix_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi2pix_clk = {
+ .halt_reg = 0x3c058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3c058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2pix_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0rdi_clk = {
+ .halt_reg = 0x4e050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0rdi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1rdi_clk = {
+ .halt_reg = 0x4f050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1rdi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi2rdi_clk = {
+ .halt_reg = 0x3c050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3c050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2rdi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi_vfe0_clk = {
+ .halt_reg = 0x58050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi_vfe0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi_vfe1_clk = {
+ .halt_reg = 0x58074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi_vfe1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_gp0_clk = {
+ .halt_reg = 0x54018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x54018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_gp0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_gp1_clk = {
+ .halt_reg = 0x55018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x55018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_ispif_ahb_clk = {
+ .halt_reg = 0x50004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_ispif_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_jpeg0_clk = {
+ .halt_reg = 0x57020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x57020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_jpeg0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_jpeg_ahb_clk = {
+ .halt_reg = 0x57024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x57024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_jpeg_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_jpeg_axi_clk = {
+ .halt_reg = 0x57028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x57028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_jpeg_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x52018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x53018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x53018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_mclk2_clk = {
+ .halt_reg = 0x5c018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_mclk3_clk = {
+ .halt_reg = 0x5e018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5e018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_micro_ahb_clk = {
+ .halt_reg = 0x5600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_micro_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x5a014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe0_ahb_clk = {
+ .halt_reg = 0x58044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe0_axi_clk = {
+ .halt_reg = 0x58048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe0_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe0_clk = {
+ .halt_reg = 0x58038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe1_ahb_clk = {
+ .halt_reg = 0x58060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe1_axi_clk = {
+ .halt_reg = 0x58068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe1_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe1_clk = {
+ .halt_reg = 0x5805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_cpp_tbu_clk = {
+ .halt_reg = 0x12040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_cpp_tbu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_crypto_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_crypto_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_dcc_clk = {
+ .halt_reg = 0x77004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x77004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_dcc_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x08000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x08000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x09000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x09000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x0a000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_jpeg_tbu_clk = {
+ .halt_reg = 0x12034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_jpeg_tbu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdp_tbu_clk = {
+ .halt_reg = 0x1201c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdp_tbu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_ahb_clk = {
+ .halt_reg = 0x4d07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_axi_clk = {
+ .halt_reg = 0x4d080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_byte0_clk = {
+ .halt_reg = 0x4d094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_byte1_clk = {
+ .halt_reg = 0x4d0a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_esc0_clk = {
+ .halt_reg = 0x4d098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_esc1_clk = {
+ .halt_reg = 0x4d09c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_mdp_clk = {
+ .halt_reg = 0x4d088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_pclk0_clk = {
+ .halt_reg = 0x4d084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_pclk1_clk = {
+ .halt_reg = 0x4d0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_vsync_clk = {
+ .halt_reg = 0x4d090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x49000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x49004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_oxili_ahb_clk = {
+ .halt_reg = 0x59028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_oxili_aon_clk = {
+ .halt_reg = 0x59044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_aon_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_oxili_gfx3d_clk = {
+ .halt_reg = 0x59020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_oxili_timer_clk = {
+ .halt_reg = 0x59040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_timer_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_pcnoc_usb3_axi_clk = {
+ .halt_reg = 0x3f038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pcnoc_usb3_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb30_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x44004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x44004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_qdss_dap_clk = {
+ .halt_reg = 0x29084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qdss_dap_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_qusb_ref_clk = {
+ .halt_reg = 0,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x41030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_qusb_ref_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_rbcpr_gfx_clk = {
+ .halt_reg = 0x3a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_rbcpr_gfx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &rbcpr_gfx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x5d014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x4201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x4301c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4301c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x42018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x43018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x43018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_smmu_cfg_clk = {
+ .halt_reg = 0x12038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_smmu_cfg_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0x3f000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_master_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb30_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0x3f008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb30_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0x3f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_sleep_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb3_aux_clk = {
+ .halt_reg = 0x3f044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb3_pipe_clk = {
+ .halt_reg = 0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x3f040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb3_pipe_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb_clk = {
+ .halt_reg = 0x3f080,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x3f080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_phy_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb_ss_ref_clk = {
+ .halt_reg = 0,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x3f07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_ss_ref_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_ahb_clk = {
+ .halt_reg = 0x4c020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_ahb_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_axi_clk = {
+ .halt_reg = 0x4c024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_axi_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
+ .halt_reg = 0x4c02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_core0_vcodec0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_vcodec0_clk = {
+ .halt_reg = 0x4c01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_vcodec0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus_tbu_clk = {
+ .halt_reg = 0x12014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus_tbu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_vfe1_tbu_clk = {
+ .halt_reg = 0x12090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_vfe1_tbu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_vfe_tbu_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_vfe_tbu_clk",
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct gdsc usb30_gdsc = {
+ .gdscr = 0x3f078,
+ .pd = {
+ .name = "usb30_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ /*
+ * FIXME: dwc3 usb gadget cannot resume after GDSC power off
+ * dwc3 7000000.dwc3: failed to enable ep0out
+ */
+ .flags = ALWAYS_ON,
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x4c018,
+ .cxcs = (unsigned int []){ 0x4c024, 0x4c01c },
+ .cxc_count = 2,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core0_gdsc = {
+ .gdscr = 0x4c028,
+ .cxcs = (unsigned int []){ 0x4c02c },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core0",
+ },
+ .flags = HW_CTRL,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x4d078,
+ .cxcs = (unsigned int []){ 0x4d080, 0x4d088 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc jpeg_gdsc = {
+ .gdscr = 0x5701c,
+ .cxcs = (unsigned int []){ 0x57020, 0x57028 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "jpeg_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe0_gdsc = {
+ .gdscr = 0x58034,
+ .cxcs = (unsigned int []){ 0x58038, 0x58048, 0x5600c, 0x58050 },
+ .cxc_count = 4,
+ .pd = {
+ .name = "vfe0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe1_gdsc = {
+ .gdscr = 0x5806c,
+ .cxcs = (unsigned int []){ 0x5805c, 0x58068, 0x5600c, 0x58074 },
+ .cxc_count = 4,
+ .pd = {
+ .name = "vfe1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxili_gx_gdsc = {
+ .gdscr = 0x5901c,
+ .clamp_io_ctrl = 0x5b00c,
+ .cxcs = (unsigned int []){ 0x59000, 0x59024 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "oxili_gx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO,
+};
+
+static struct gdsc oxili_cx_gdsc = {
+ .gdscr = 0x5904c,
+ .cxcs = (unsigned int []){ 0x59020 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc cpp_gdsc = {
+ .gdscr = 0x58078,
+ .cxcs = (unsigned int []){ 0x5803c, 0x58064 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "cpp_gdsc",
+ },
+ .flags = ALWAYS_ON,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_hw *gcc_msm8953_hws[] = {
+ &gpll0_early_div.hw,
+ &gpll6_early_div.hw,
+};
+
+static struct clk_regmap *gcc_msm8953_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_EARLY] = &gpll0_early.clkr,
+ [GPLL2] = &gpll2.clkr,
+ [GPLL2_EARLY] = &gpll2_early.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL3_EARLY] = &gpll3_early.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_EARLY] = &gpll4_early.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL6_EARLY] = &gpll6_early.clkr,
+ [GCC_APSS_AHB_CLK] = &gcc_apss_ahb_clk.clkr,
+ [GCC_APSS_AXI_CLK] = &gcc_apss_axi_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+ [GCC_APSS_TCU_ASYNC_CLK] = &gcc_apss_tcu_async_clk.clkr,
+ [GCC_CPP_TBU_CLK] = &gcc_cpp_tbu_clk.clkr,
+ [GCC_JPEG_TBU_CLK] = &gcc_jpeg_tbu_clk.clkr,
+ [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr,
+ [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
+ [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr,
+ [GCC_VFE1_TBU_CLK] = &gcc_vfe1_tbu_clk.clkr,
+ [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
+ [CAMSS_TOP_AHB_CLK_SRC] = &camss_top_ahb_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [APC0_DROOP_DETECTOR_CLK_SRC] = &apc0_droop_detector_clk_src.clkr,
+ [APC1_DROOP_DETECTOR_CLK_SRC] = &apc1_droop_detector_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CSI0P_CLK_SRC] = &csi0p_clk_src.clkr,
+ [CSI1P_CLK_SRC] = &csi1p_clk_src.clkr,
+ [CSI2P_CLK_SRC] = &csi2p_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [MCLK3_CLK_SRC] = &mclk3_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CSI2PHYTIMER_CLK_SRC] = &csi2phytimer_clk_src.clkr,
+ [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [RBCPR_GFX_CLK_SRC] = &rbcpr_gfx_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [USB3_AUX_CLK_SRC] = &usb3_aux_clk_src.clkr,
+ [GCC_APC0_DROOP_DETECTOR_GPLL0_CLK] = &gcc_apc0_droop_detector_gpll0_clk.clkr,
+ [GCC_APC1_DROOP_DETECTOR_GPLL0_CLK] = &gcc_apc1_droop_detector_gpll0_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_CAMSS_CCI_AHB_CLK] = &gcc_camss_cci_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_CLK] = &gcc_camss_cci_clk.clkr,
+ [GCC_CAMSS_CPP_AHB_CLK] = &gcc_camss_cpp_ahb_clk.clkr,
+ [GCC_CAMSS_CPP_AXI_CLK] = &gcc_camss_cpp_axi_clk.clkr,
+ [GCC_CAMSS_CPP_CLK] = &gcc_camss_cpp_clk.clkr,
+ [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr,
+ [GCC_CAMSS_CSI0_CSIPHY_3P_CLK] = &gcc_camss_csi0_csiphy_3p_clk.clkr,
+ [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr,
+ [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr,
+ [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr,
+ [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr,
+ [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr,
+ [GCC_CAMSS_CSI1_CSIPHY_3P_CLK] = &gcc_camss_csi1_csiphy_3p_clk.clkr,
+ [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr,
+ [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr,
+ [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr,
+ [GCC_CAMSS_CSI2_AHB_CLK] = &gcc_camss_csi2_ahb_clk.clkr,
+ [GCC_CAMSS_CSI2_CLK] = &gcc_camss_csi2_clk.clkr,
+ [GCC_CAMSS_CSI2_CSIPHY_3P_CLK] = &gcc_camss_csi2_csiphy_3p_clk.clkr,
+ [GCC_CAMSS_CSI2PHY_CLK] = &gcc_camss_csi2phy_clk.clkr,
+ [GCC_CAMSS_CSI2PIX_CLK] = &gcc_camss_csi2pix_clk.clkr,
+ [GCC_CAMSS_CSI2RDI_CLK] = &gcc_camss_csi2rdi_clk.clkr,
+ [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr,
+ [GCC_CAMSS_CSI_VFE1_CLK] = &gcc_camss_csi_vfe1_clk.clkr,
+ [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr,
+ [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr,
+ [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG0_CLK] = &gcc_camss_jpeg0_clk.clkr,
+ [GCC_CAMSS_JPEG_AHB_CLK] = &gcc_camss_jpeg_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG_AXI_CLK] = &gcc_camss_jpeg_axi_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+ [GCC_CAMSS_MCLK3_CLK] = &gcc_camss_mclk3_clk.clkr,
+ [GCC_CAMSS_MICRO_AHB_CLK] = &gcc_camss_micro_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_CSI2PHYTIMER_CLK] = &gcc_camss_csi2phytimer_clk.clkr,
+ [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr,
+ [GCC_CAMSS_VFE0_AHB_CLK] = &gcc_camss_vfe0_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_AXI_CLK] = &gcc_camss_vfe0_axi_clk.clkr,
+ [GCC_CAMSS_VFE1_AHB_CLK] = &gcc_camss_vfe1_ahb_clk.clkr,
+ [GCC_CAMSS_VFE1_AXI_CLK] = &gcc_camss_vfe1_axi_clk.clkr,
+ [GCC_CAMSS_VFE1_CLK] = &gcc_camss_vfe1_clk.clkr,
+ [GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_PCNOC_USB3_AXI_CLK] = &gcc_pcnoc_usb3_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_RBCPR_GFX_CLK] = &gcc_rbcpr_gfx_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB3_AUX_CLK] = &gcc_usb3_aux_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB_CLK] = &gcc_usb_phy_cfg_ahb_clk.clkr,
+ [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
+ [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
+ [GCC_VENUS0_CORE0_VCODEC0_CLK] = &gcc_venus0_core0_vcodec0_clk.clkr,
+ [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+ [GCC_QUSB_REF_CLK] = &gcc_qusb_ref_clk.clkr,
+ [GCC_USB_SS_REF_CLK] = &gcc_usb_ss_ref_clk.clkr,
+ [GCC_USB3_PIPE_CLK] = &gcc_usb3_pipe_clk.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr,
+ [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr,
+ [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr,
+ [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr,
+ [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr,
+ [GCC_MDSS_PCLK1_CLK] = &gcc_mdss_pclk1_clk.clkr,
+ [GCC_MDSS_BYTE1_CLK] = &gcc_mdss_byte1_clk.clkr,
+ [GCC_MDSS_ESC1_CLK] = &gcc_mdss_esc1_clk.clkr,
+ [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr,
+ [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr,
+ [GCC_OXILI_TIMER_CLK] = &gcc_oxili_timer_clk.clkr,
+ [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr,
+ [GCC_OXILI_AON_CLK] = &gcc_oxili_aon_clk.clkr,
+ [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+};
+
+static const struct qcom_reset_map gcc_msm8953_resets[] = {
+ [GCC_CAMSS_MICRO_BCR] = { 0x56008 },
+ [GCC_MSS_BCR] = { 0x71000 },
+ [GCC_QUSB2_PHY_BCR] = { 0x4103c },
+ [GCC_USB3PHY_PHY_BCR] = { 0x3f03c },
+ [GCC_USB3_PHY_BCR] = { 0x3f034 },
+ [GCC_USB_30_BCR] = { 0x3f070 },
+};
+
+static const struct regmap_config gcc_msm8953_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x80000,
+ .fast_io = true,
+};
+
+static struct gdsc *gcc_msm8953_gdscs[] = {
+ [CPP_GDSC] = &cpp_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [OXILI_CX_GDSC] = &oxili_cx_gdsc,
+ [OXILI_GX_GDSC] = &oxili_gx_gdsc,
+ [USB30_GDSC] = &usb30_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+ [VFE0_GDSC] = &vfe0_gdsc,
+ [VFE1_GDSC] = &vfe1_gdsc,
+};
+
+static const struct qcom_cc_desc gcc_msm8953_desc = {
+ .config = &gcc_msm8953_regmap_config,
+ .clks = gcc_msm8953_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8953_clocks),
+ .resets = gcc_msm8953_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8953_resets),
+ .gdscs = gcc_msm8953_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8953_gdscs),
+ .clk_hws = gcc_msm8953_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_msm8953_hws),
+};
+
+static int gcc_msm8953_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_msm8953_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_alpha_pll_configure(&gpll3_early, regmap, &gpll3_early_config);
+
+ return qcom_cc_really_probe(pdev, &gcc_msm8953_desc, regmap);
+}
+
+static const struct of_device_id gcc_msm8953_match_table[] = {
+ { .compatible = "qcom,gcc-msm8953" },
+ {},
+};
+
+static struct platform_driver gcc_msm8953_driver = {
+ .probe = gcc_msm8953_probe,
+ .driver = {
+ .name = "gcc-msm8953",
+ .of_match_table = gcc_msm8953_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gcc_msm8953_init(void)
+{
+ return platform_driver_register(&gcc_msm8953_driver);
+}
+core_initcall(gcc_msm8953_init);
+
+static void __exit gcc_msm8953_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8953_driver);
+}
+module_exit(gcc_msm8953_exit);
+
+MODULE_DESCRIPTION("Qualcomm GCC MSM8953 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index 6394257ca8c0..4d36f96e9ae2 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -37,114 +37,14 @@ enum {
P_GPLL1_EARLY_DIV,
};
-static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div[] = {
- { P_XO, 0 },
- { P_GPLL0, 1 },
- { P_GPLL0_EARLY_DIV, 6 },
-};
-
-static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div[] = {
- "xo",
- "gpll0",
- "gpll0_early_div",
-};
-
-static const struct parent_map gcc_parent_map_xo_gpll0[] = {
- { P_XO, 0 },
- { P_GPLL0, 1 },
-};
-
-static const char * const gcc_parent_names_xo_gpll0[] = {
- "xo",
- "gpll0",
-};
-
-static const struct parent_map gcc_parent_map_xo_gpll0_sleep_clk_gpll0_early_div[] = {
- { P_XO, 0 },
- { P_GPLL0, 1 },
- { P_SLEEP_CLK, 5 },
- { P_GPLL0_EARLY_DIV, 6 },
-};
-
-static const char * const gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div[] = {
- "xo",
- "gpll0",
- "sleep_clk",
- "gpll0_early_div",
-};
-
-static const struct parent_map gcc_parent_map_xo_sleep_clk[] = {
- { P_XO, 0 },
- { P_SLEEP_CLK, 5 },
-};
-
-static const char * const gcc_parent_names_xo_sleep_clk[] = {
- "xo",
- "sleep_clk",
-};
-
-static const struct parent_map gcc_parent_map_xo_gpll4[] = {
- { P_XO, 0 },
- { P_GPLL4, 5 },
-};
-
-static const char * const gcc_parent_names_xo_gpll4[] = {
- "xo",
- "gpll4",
-};
-
-static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = {
- { P_XO, 0 },
- { P_GPLL0, 1 },
- { P_GPLL0_EARLY_DIV, 3 },
- { P_GPLL1, 4 },
- { P_GPLL4, 5 },
- { P_GPLL1_EARLY_DIV, 6 },
-};
-
-static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = {
- "xo",
- "gpll0",
- "gpll0_early_div",
- "gpll1",
- "gpll4",
- "gpll1_early_div",
-};
-
-static const struct parent_map gcc_parent_map_xo_gpll0_gpll4_gpll0_early_div[] = {
- { P_XO, 0 },
- { P_GPLL0, 1 },
- { P_GPLL4, 5 },
- { P_GPLL0_EARLY_DIV, 6 },
-};
-
-static const char * const gcc_parent_names_xo_gpll0_gpll4_gpll0_early_div[] = {
- "xo",
- "gpll0",
- "gpll4",
- "gpll0_early_div",
-};
-
-static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll4[] = {
- { P_XO, 0 },
- { P_GPLL0, 1 },
- { P_GPLL0_EARLY_DIV, 2 },
- { P_GPLL4, 5 },
-};
-
-static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div_gpll4[] = {
- "xo",
- "gpll0",
- "gpll0_early_div",
- "gpll4",
-};
-
static struct clk_fixed_factor xo = {
.mult = 1,
.div = 1,
.hw.init = &(struct clk_init_data){
.name = "xo",
- .parent_names = (const char *[]){ "xo_board" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo"
+ },
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
},
@@ -158,7 +58,9 @@ static struct clk_alpha_pll gpll0_early = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpll0_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -170,7 +72,9 @@ static struct clk_fixed_factor gpll0_early_div = {
.div = 2,
.hw.init = &(struct clk_init_data){
.name = "gpll0_early_div",
- .parent_names = (const char *[]){ "gpll0_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0_early.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
},
@@ -181,7 +85,9 @@ static struct clk_alpha_pll_postdiv gpll0 = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0",
- .parent_names = (const char *[]){ "gpll0_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0_early.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
},
@@ -195,7 +101,9 @@ static struct clk_alpha_pll gpll1_early = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gpll1_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -207,7 +115,9 @@ static struct clk_fixed_factor gpll1_early_div = {
.div = 2,
.hw.init = &(struct clk_init_data){
.name = "gpll1_early_div",
- .parent_names = (const char *[]){ "gpll1_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll1_early.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
},
@@ -218,7 +128,9 @@ static struct clk_alpha_pll_postdiv gpll1 = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1",
- .parent_names = (const char *[]){ "gpll1_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll1_early.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
},
@@ -232,7 +144,9 @@ static struct clk_alpha_pll gpll4_early = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gpll4_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -245,12 +159,116 @@ static struct clk_alpha_pll_postdiv gpll4 = {
.clkr.hw.init = &(struct clk_init_data)
{
.name = "gpll4",
- .parent_names = (const char *[]) { "gpll4_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll4_early.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
},
};
+static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_EARLY_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_xo_gpll0_gpll0_early_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_xo_gpll0[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_xo_gpll0[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_xo_gpll0_sleep_clk_gpll0_early_div[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_EARLY_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .hw = &gpll0_early_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_xo_sleep_clk[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_xo_sleep_clk[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_xo_gpll4[] = {
+ { P_XO, 0 },
+ { P_GPLL4, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_xo_gpll4[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_EARLY_DIV, 3 },
+ { P_GPLL1, 4 },
+ { P_GPLL4, 5 },
+ { P_GPLL1_EARLY_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+ { .hw = &gpll1.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll1_early_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_xo_gpll0_gpll4_gpll0_early_div[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 5 },
+ { P_GPLL0_EARLY_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll4[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_EARLY_DIV, 2 },
+ { P_GPLL4, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_xo_gpll0_gpll0_early_div_gpll4[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_early_div.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
static const struct freq_tbl ftbl_blsp1_qup1_i2c_apps_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
@@ -265,7 +283,7 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -290,7 +308,7 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_spi_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -304,7 +322,7 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -318,7 +336,7 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_spi_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -332,7 +350,7 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -346,7 +364,7 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_spi_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -360,7 +378,7 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -374,7 +392,7 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_spi_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -407,7 +425,7 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart1_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -421,7 +439,7 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart2_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -435,7 +453,7 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -449,7 +467,7 @@ static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_spi_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -463,7 +481,7 @@ static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -477,7 +495,7 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_spi_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -491,7 +509,7 @@ static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -505,7 +523,7 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_spi_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -519,7 +537,7 @@ static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -533,7 +551,7 @@ static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_spi_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -547,7 +565,7 @@ static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart1_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -561,7 +579,7 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart2_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -582,7 +600,7 @@ static struct clk_rcg2 gp1_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp1_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
.num_parents = 4,
.ops = &clk_rcg2_ops,
},
@@ -596,7 +614,7 @@ static struct clk_rcg2 gp2_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp2_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
.num_parents = 4,
.ops = &clk_rcg2_ops,
},
@@ -610,7 +628,7 @@ static struct clk_rcg2 gp3_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp3_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
.num_parents = 4,
.ops = &clk_rcg2_ops,
},
@@ -630,7 +648,7 @@ static struct clk_rcg2 hmss_gpll0_clk_src = {
.freq_tbl = ftbl_hmss_gpll0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_gpll0_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -651,7 +669,7 @@ static struct clk_rcg2 hmss_gpll4_clk_src = {
.freq_tbl = ftbl_hmss_gpll4_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_gpll4_clk_src",
- .parent_names = gcc_parent_names_xo_gpll4,
+ .parent_data = gcc_parent_data_xo_gpll4,
.num_parents = 2,
.ops = &clk_rcg2_ops,
},
@@ -670,7 +688,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
.freq_tbl = ftbl_hmss_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_rbcpr_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0,
+ .parent_data = gcc_parent_data_xo_gpll0,
.num_parents = 2,
.ops = &clk_rcg2_ops,
},
@@ -689,7 +707,7 @@ static struct clk_rcg2 pdm2_clk_src = {
.freq_tbl = ftbl_pdm2_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "pdm2_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -711,7 +729,7 @@ static struct clk_rcg2 qspi_ser_clk_src = {
.freq_tbl = ftbl_qspi_ser_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "qspi_ser_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div,
.num_parents = 6,
.ops = &clk_rcg2_ops,
},
@@ -737,7 +755,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.freq_tbl = ftbl_sdcc1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll4_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div,
.num_parents = 4,
.ops = &clk_rcg2_ops,
},
@@ -759,7 +777,7 @@ static struct clk_rcg2 sdcc1_ice_core_clk_src = {
.freq_tbl = ftbl_sdcc1_ice_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_ice_core_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -785,7 +803,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.freq_tbl = ftbl_sdcc2_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc2_apps_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div_gpll4,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div_gpll4,
.num_parents = 4,
.ops = &clk_rcg2_floor_ops,
},
@@ -808,7 +826,7 @@ static struct clk_rcg2 ufs_axi_clk_src = {
.freq_tbl = ftbl_ufs_axi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_axi_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -829,7 +847,7 @@ static struct clk_rcg2 ufs_ice_core_clk_src = {
.freq_tbl = ftbl_ufs_ice_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_ice_core_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -843,7 +861,7 @@ static struct clk_rcg2 ufs_phy_aux_clk_src = {
.freq_tbl = ftbl_hmss_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_phy_aux_clk_src",
- .parent_names = gcc_parent_names_xo_sleep_clk,
+ .parent_data = gcc_parent_data_xo_sleep_clk,
.num_parents = 2,
.ops = &clk_rcg2_ops,
},
@@ -864,7 +882,7 @@ static struct clk_rcg2 ufs_unipro_core_clk_src = {
.freq_tbl = ftbl_ufs_unipro_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_unipro_core_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -885,7 +903,7 @@ static struct clk_rcg2 usb20_master_clk_src = {
.freq_tbl = ftbl_usb20_master_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb20_master_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -905,7 +923,7 @@ static struct clk_rcg2 usb20_mock_utmi_clk_src = {
.freq_tbl = ftbl_usb20_mock_utmi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb20_mock_utmi_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -930,7 +948,7 @@ static struct clk_rcg2 usb30_master_clk_src = {
.freq_tbl = ftbl_usb30_master_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_master_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -951,7 +969,7 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
.freq_tbl = ftbl_usb30_mock_utmi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_mock_utmi_clk_src",
- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
@@ -971,7 +989,7 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = {
.freq_tbl = ftbl_usb3_phy_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb3_phy_aux_clk_src",
- .parent_names = gcc_parent_names_xo_sleep_clk,
+ .parent_data = gcc_parent_data_xo_sleep_clk,
.num_parents = 2,
.ops = &clk_rcg2_ops,
},
@@ -985,8 +1003,8 @@ static struct clk_branch gcc_aggre2_ufs_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre2_ufs_axi_clk",
- .parent_names = (const char *[]){
- "ufs_axi_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ufs_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1002,8 +1020,8 @@ static struct clk_branch gcc_aggre2_usb3_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre2_usb3_axi_clk",
- .parent_names = (const char *[]){
- "usb30_master_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb30_master_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1071,8 +1089,8 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1089,8 +1107,8 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1107,8 +1125,8 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1125,8 +1143,8 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1143,8 +1161,8 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup3_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1161,8 +1179,8 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup3_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1179,8 +1197,8 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup4_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1197,8 +1215,8 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup4_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1215,8 +1233,8 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart1_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart1_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_uart1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1233,8 +1251,8 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart2_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart2_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_uart2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1264,8 +1282,8 @@ static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup1_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup1_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp2_qup1_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1282,8 +1300,8 @@ static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup1_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup1_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp2_qup1_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1300,8 +1318,8 @@ static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup2_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup2_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp2_qup2_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1318,8 +1336,8 @@ static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup2_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup2_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp2_qup2_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1336,8 +1354,8 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup3_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup3_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp2_qup3_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1354,8 +1372,8 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup3_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup3_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp2_qup3_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1372,8 +1390,8 @@ static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup4_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup4_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp2_qup4_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1390,8 +1408,8 @@ static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup4_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup4_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp2_qup4_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1408,8 +1426,8 @@ static struct clk_branch gcc_blsp2_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart1_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart1_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp2_uart1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1426,8 +1444,8 @@ static struct clk_branch gcc_blsp2_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart2_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart2_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp2_uart2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1457,8 +1475,8 @@ static struct clk_branch gcc_cfg_noc_usb2_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_cfg_noc_usb2_axi_clk",
- .parent_names = (const char *[]){
- "usb20_master_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb20_master_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1474,8 +1492,8 @@ static struct clk_branch gcc_cfg_noc_usb3_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_cfg_noc_usb3_axi_clk",
- .parent_names = (const char *[]){
- "usb30_master_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb30_master_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1503,8 +1521,8 @@ static struct clk_branch gcc_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_names = (const char *[]){
- "gp1_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1521,8 +1539,8 @@ static struct clk_branch gcc_gp2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_names = (const char *[]){
- "gp2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gp2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1539,8 +1557,8 @@ static struct clk_branch gcc_gp3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_names = (const char *[]){
- "gp3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gp3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1584,8 +1602,8 @@ static struct clk_branch gcc_gpu_gpll0_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_gpll0_clk",
- .parent_names = (const char *[]){
- "gpll0",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1601,8 +1619,8 @@ static struct clk_branch gcc_gpu_gpll0_div_clk = {
.enable_mask = BIT(3),
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_gpll0_div_clk",
- .parent_names = (const char *[]){
- "gpll0_early_div",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0_early_div.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1632,8 +1650,8 @@ static struct clk_branch gcc_hmss_rbcpr_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_hmss_rbcpr_clk",
- .parent_names = (const char *[]){
- "hmss_rbcpr_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &hmss_rbcpr_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1650,8 +1668,8 @@ static struct clk_branch gcc_mmss_gpll0_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_mmss_gpll0_clk",
- .parent_names = (const char *[]){
- "gpll0",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1667,8 +1685,8 @@ static struct clk_branch gcc_mmss_gpll0_div_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mmss_gpll0_div_clk",
- .parent_names = (const char *[]){
- "gpll0_early_div",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0_early_div.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1767,8 +1785,8 @@ static struct clk_branch gcc_pdm2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_names = (const char *[]){
- "pdm2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pdm2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1824,8 +1842,8 @@ static struct clk_branch gcc_qspi_ser_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_qspi_ser_clk",
- .parent_names = (const char *[]){
- "qspi_ser_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &qspi_ser_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1881,8 +1899,8 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_apps_clk",
- .parent_names = (const char *[]){
- "sdcc1_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &sdcc1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1899,8 +1917,8 @@ static struct clk_branch gcc_sdcc1_ice_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ice_core_clk",
- .parent_names = (const char *[]){
- "sdcc1_ice_core_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &sdcc1_ice_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1930,8 +1948,8 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk",
- .parent_names = (const char *[]){
- "sdcc2_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &sdcc2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1961,8 +1979,8 @@ static struct clk_branch gcc_ufs_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_axi_clk",
- .parent_names = (const char *[]){
- "ufs_axi_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ufs_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1992,8 +2010,8 @@ static struct clk_branch gcc_ufs_ice_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_ice_core_clk",
- .parent_names = (const char *[]){
- "ufs_ice_core_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ufs_ice_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2010,8 +2028,8 @@ static struct clk_branch gcc_ufs_phy_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_aux_clk",
- .parent_names = (const char *[]){
- "ufs_phy_aux_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ufs_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2067,8 +2085,8 @@ static struct clk_branch gcc_ufs_unipro_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_unipro_core_clk",
- .parent_names = (const char *[]){
- "ufs_unipro_core_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ufs_unipro_core_clk_src.clkr.hw,
},
.flags = CLK_SET_RATE_PARENT,
.num_parents = 1,
@@ -2085,8 +2103,8 @@ static struct clk_branch gcc_usb20_master_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb20_master_clk",
- .parent_names = (const char *[]){
- "usb20_master_clk_src"
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb20_master_clk_src.clkr.hw,
},
.flags = CLK_SET_RATE_PARENT,
.num_parents = 1,
@@ -2103,8 +2121,8 @@ static struct clk_branch gcc_usb20_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb20_mock_utmi_clk",
- .parent_names = (const char *[]){
- "usb20_mock_utmi_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb20_mock_utmi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2134,8 +2152,8 @@ static struct clk_branch gcc_usb30_master_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_master_clk",
- .parent_names = (const char *[]){
- "usb30_master_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb30_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2152,8 +2170,8 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_mock_utmi_clk",
- .parent_names = (const char *[]){
- "usb30_mock_utmi_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb30_mock_utmi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2196,8 +2214,8 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_phy_aux_clk",
- .parent_names = (const char *[]){
- "usb3_phy_aux_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb3_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/gcc-sm6115.c b/drivers/clk/qcom/gcc-sm6115.c
new file mode 100644
index 000000000000..bc09736ece76
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm6115.c
@@ -0,0 +1,3544 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-sm6115.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_AUX2,
+ P_GPLL0_OUT_EARLY,
+ P_GPLL10_OUT_MAIN,
+ P_GPLL11_OUT_MAIN,
+ P_GPLL3_OUT_EARLY,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL6_OUT_EARLY,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL7_OUT_MAIN,
+ P_GPLL8_OUT_EARLY,
+ P_GPLL8_OUT_MAIN,
+ P_GPLL9_OUT_EARLY,
+ P_GPLL9_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct pll_vco default_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+static struct pll_vco gpll9_vco[] = {
+ { 500000000, 1250000000, 0 },
+};
+
+static struct pll_vco gpll10_vco[] = {
+ { 750000000, 1500000000, 1 },
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_aux2[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_aux2 = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll0_out_aux2,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_aux2),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_aux2",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+/* listed as BRAMMO, but it doesn't really match */
+static const u8 clk_gpll9_regs[PLL_OFF_MAX_REGS] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_TEST_CTL] = 0x10,
+ [PLL_OFF_TEST_CTL_U] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x1C,
+ [PLL_OFF_STATUS] = 0x20,
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_main[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_main = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll0_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+/* 1152MHz configuration */
+static const struct alpha_pll_config gpll10_config = {
+ .l = 0x3c,
+ .vco_val = 0x1 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055b,
+};
+
+static struct clk_alpha_pll gpll10 = {
+ .offset = 0xa000,
+ .vco_table = gpll10_vco,
+ .num_vco = ARRAY_SIZE(gpll10_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll10",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll10_out_main[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll10_out_main = {
+ .offset = 0xa000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll10_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll10_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll10_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll10.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+/* 600MHz configuration */
+static const struct alpha_pll_config gpll11_config = {
+ .l = 0x1F,
+ .alpha = 0x0,
+ .alpha_hi = 0x40,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .config_ctl_val = 0x4001055b,
+};
+
+static struct clk_alpha_pll gpll11 = {
+ .offset = 0xb000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll11",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll11_out_main[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll11_out_main = {
+ .offset = 0xb000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll11_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll11_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll11_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll11.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll3 = {
+ .offset = 0x3000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x4000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll4_out_main[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_main = {
+ .offset = 0x4000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll4_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll4_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll4.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x6000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll6_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll6_out_main = {
+ .offset = 0x6000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll6_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll6.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x7000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll7",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll7_out_main[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll7_out_main = {
+ .offset = 0x7000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll7_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll7_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll7_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll7.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+/* 800MHz configuration */
+static const struct alpha_pll_config gpll8_config = {
+ .l = 0x29,
+ .alpha = 0xAAAAAAAA,
+ .alpha_hi = 0xAA,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .early_output_mask = BIT(3),
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = GENMASK(11, 8),
+ .config_ctl_val = 0x4001055b,
+};
+
+static struct clk_alpha_pll gpll8 = {
+ .offset = 0x8000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll8",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll8_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll8_out_main = {
+ .offset = 0x8000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll8_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll8_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll8.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+/* 1152MHz configuration */
+static const struct alpha_pll_config gpll9_config = {
+ .l = 0x3C,
+ .alpha = 0x0,
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = GENMASK(9, 8),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x00004289,
+};
+
+static struct clk_alpha_pll gpll9 = {
+ .offset = 0x9000,
+ .vco_table = gpll9_vco,
+ .num_vco = ARRAY_SIZE(gpll9_vco),
+ .regs = clk_gpll9_regs,
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll9_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll9_out_main = {
+ .offset = 0x9000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll9_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll9_out_main),
+ .width = 2,
+ .regs = clk_gpll9_regs,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll9_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll9.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+};
+
+static const struct clk_parent_data gcc_parents_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL6_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data gcc_parents_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll6_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL9_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL9_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll9.clkr.hw },
+ { .hw = &gpll10_out_main.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL4_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll4_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL8_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10_out_main.clkr.hw },
+ { .hw = &gpll8_out_main.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL6_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_6[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10_out_main.clkr.hw },
+ { .hw = &gpll6_out_main.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_7[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll10_out_main.clkr.hw },
+ { .hw = &gpll4_out_main.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL8_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_8[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10_out_main.clkr.hw },
+ { .hw = &gpll8_out_main.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL8_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_9[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll10_out_main.clkr.hw },
+ { .hw = &gpll8_out_main.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL6_OUT_EARLY, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_10[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10_out_main.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL4_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_11[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll7_out_main.clkr.hw },
+ { .hw = &gpll4_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_12[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL11_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parents_13[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll11_out_main.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_AUX2, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_axi_clk_src = {
+ .cmd_rcgr = 0x5802c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_camss_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_axi_clk_src",
+ .parent_data = gcc_parents_7,
+ .num_parents = ARRAY_SIZE(gcc_parents_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_cci_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_AUX2, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_cci_clk_src = {
+ .cmd_rcgr = 0x56000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_camss_cci_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_clk_src",
+ .parent_data = gcc_parents_9,
+ .num_parents = ARRAY_SIZE(gcc_parents_9),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+ F(268800000, P_GPLL4_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x59000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = ARRAY_SIZE(gcc_parents_4),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x5901c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = ARRAY_SIZE(gcc_parents_4),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x59038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi2phytimer_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = ARRAY_SIZE(gcc_parents_4),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_GPLL9_OUT_MAIN, 1, 1, 24),
+ F(64000000, P_GPLL9_OUT_MAIN, 1, 1, 9),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_mclk0_clk_src = {
+ .cmd_rcgr = 0x51000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk1_clk_src = {
+ .cmd_rcgr = 0x5101c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk2_clk_src = {
+ .cmd_rcgr = 0x51038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk2_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk3_clk_src = {
+ .cmd_rcgr = 0x51054,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk3_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ope_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(171428571, P_GPLL0_OUT_EARLY, 3.5, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = {
+ .cmd_rcgr = 0x55024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_camss_ope_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_ahb_clk_src",
+ .parent_data = gcc_parents_8,
+ .num_parents = ARRAY_SIZE(gcc_parents_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ope_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL8_OUT_MAIN, 2, 0, 0),
+ F(266600000, P_GPLL8_OUT_MAIN, 1, 0, 0),
+ F(465000000, P_GPLL8_OUT_MAIN, 1, 0, 0),
+ F(576000000, P_GPLL9_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_ope_clk_src = {
+ .cmd_rcgr = 0x55004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_camss_ope_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_clk_src",
+ .parent_data = gcc_parents_8,
+ .num_parents = ARRAY_SIZE(gcc_parents_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(128000000, P_GPLL10_OUT_MAIN, 9, 0, 0),
+ F(135529412, P_GPLL10_OUT_MAIN, 8.5, 0, 0),
+ F(144000000, P_GPLL10_OUT_MAIN, 8, 0, 0),
+ F(153600000, P_GPLL10_OUT_MAIN, 7.5, 0, 0),
+ F(164571429, P_GPLL10_OUT_MAIN, 7, 0, 0),
+ F(177230769, P_GPLL10_OUT_MAIN, 6.5, 0, 0),
+ F(192000000, P_GPLL10_OUT_MAIN, 6, 0, 0),
+ F(209454545, P_GPLL10_OUT_MAIN, 5.5, 0, 0),
+ F(230400000, P_GPLL10_OUT_MAIN, 5, 0, 0),
+ F(256000000, P_GPLL10_OUT_MAIN, 4.5, 0, 0),
+ F(288000000, P_GPLL10_OUT_MAIN, 4, 0, 0),
+ F(329142857, P_GPLL10_OUT_MAIN, 3.5, 0, 0),
+ F(384000000, P_GPLL10_OUT_MAIN, 3, 0, 0),
+ F(460800000, P_GPLL10_OUT_MAIN, 2.5, 0, 0),
+ F(576000000, P_GPLL10_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_0_clk_src = {
+ .cmd_rcgr = 0x52004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_clk_src",
+ .parent_data = gcc_parents_5,
+ .num_parents = ARRAY_SIZE(gcc_parents_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(120000000, P_GPLL0_OUT_EARLY, 5, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ F(426400000, P_GPLL3_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = {
+ .cmd_rcgr = 0x52094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_csid_clk_src",
+ .parent_data = gcc_parents_6,
+ .num_parents = ARRAY_SIZE(gcc_parents_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_1_clk_src = {
+ .cmd_rcgr = 0x52024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_clk_src",
+ .parent_data = gcc_parents_5,
+ .num_parents = ARRAY_SIZE(gcc_parents_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = {
+ .cmd_rcgr = 0x520b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_csid_clk_src",
+ .parent_data = gcc_parents_6,
+ .num_parents = ARRAY_SIZE(gcc_parents_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_2_clk_src = {
+ .cmd_rcgr = 0x52044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_clk_src",
+ .parent_data = gcc_parents_5,
+ .num_parents = ARRAY_SIZE(gcc_parents_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_2_csid_clk_src = {
+ .cmd_rcgr = 0x520d4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_csid_clk_src",
+ .parent_data = gcc_parents_6,
+ .num_parents = ARRAY_SIZE(gcc_parents_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ F(341333333, P_GPLL6_OUT_EARLY, 1, 4, 9),
+ F(384000000, P_GPLL6_OUT_EARLY, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x52064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_camss_tfe_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_cphy_rx_clk_src",
+ .parent_data = gcc_parents_10,
+ .num_parents = ARRAY_SIZE(gcc_parents_10),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_top_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(40000000, P_GPLL0_OUT_AUX2, 7.5, 0, 0),
+ F(80000000, P_GPLL0_OUT_EARLY, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_top_ahb_clk_src = {
+ .cmd_rcgr = 0x58010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_camss_top_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk_src",
+ .parent_data = gcc_parents_7,
+ .num_parents = ARRAY_SIZE(gcc_parents_7),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x4d004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = ARRAY_SIZE(gcc_parents_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x4e004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = ARRAY_SIZE(gcc_parents_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x4f004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = ARRAY_SIZE(gcc_parents_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_AUX2, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x20010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_AUX2, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_AUX2, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_AUX2, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_AUX2, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_AUX2, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_AUX2, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_AUX2, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_AUX2, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_AUX2, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_AUX2, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_AUX2, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_AUX2, 2.5, 0, 0),
+ F(128000000, P_GPLL6_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x1f148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x1f278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x1f3a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x1f4d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x1f608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x1f738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_AUX2, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_AUX2, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x38028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_EARLY, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_AUX2, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x38010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_EARLY, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1e00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parents_11,
+ .num_parents = ARRAY_SIZE(gcc_parents_11),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_OPS_PARENT_ENABLE,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_EARLY, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x45020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_AUX2, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+ F(300000000, P_GPLL0_OUT_AUX2, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x45048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x4507c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_AUX2, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x45060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GPLL0_OUT_AUX2, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_EARLY, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_EARLY, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x1a01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1a034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x1a04c,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1a060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_12,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parents_12,
+ .num_parents = ARRAY_SIZE(gcc_parents_12),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_video_venus_clk_src[] = {
+ F(133333333, P_GPLL11_OUT_MAIN, 4.5, 0, 0),
+ F(240000000, P_GPLL11_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_GPLL11_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL11_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_video_venus_clk_src = {
+ .cmd_rcgr = 0x58060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_13,
+ .freq_tbl = ftbl_gcc_video_venus_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_video_venus_clk_src",
+ .parent_data = gcc_parents_13,
+ .num_parents = ARRAY_SIZE(gcc_parents_13),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_csi_clk = {
+ .halt_reg = 0x1d004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1d004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb2phy_csi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_usb_clk = {
+ .halt_reg = 0x1d008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1d008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb2phy_usb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x71154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cam_throttle_nrt_clk = {
+ .halt_reg = 0x17070,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17070,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cam_throttle_nrt_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cam_throttle_rt_clk = {
+ .halt_reg = 0x1706c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cam_throttle_rt_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+ .halt_reg = 0x17028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_xo_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_axi_clk = {
+ .halt_reg = 0x58044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_camnoc_atb_clk = {
+ .halt_reg = 0x5804c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x5804c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_camnoc_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_camnoc_nts_xo_clk = {
+ .halt_reg = 0x58050,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x58050,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x58050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_camnoc_nts_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_0_clk = {
+ .halt_reg = 0x56018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x56018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_0_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_cci_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_0_clk = {
+ .halt_reg = 0x52088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_0_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_1_clk = {
+ .halt_reg = 0x5208c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5208c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_2_clk = {
+ .halt_reg = 0x52090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x59018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phytimer_clk = {
+ .halt_reg = 0x59034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2phytimer_clk = {
+ .halt_reg = 0x59050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x51018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x51034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk2_clk = {
+ .halt_reg = 0x51050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk3_clk = {
+ .halt_reg = 0x5106c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5106c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk3_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_nrt_axi_clk = {
+ .halt_reg = 0x58054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_nrt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ope_ahb_clk = {
+ .halt_reg = 0x5503c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5503c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_ope_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ope_clk = {
+ .halt_reg = 0x5501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_ope_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_rt_axi_clk = {
+ .halt_reg = 0x5805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_rt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_clk = {
+ .halt_reg = 0x5201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_tfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_cphy_rx_clk = {
+ .halt_reg = 0x5207c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5207c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_csid_clk = {
+ .halt_reg = 0x520ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_csid_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_tfe_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_clk = {
+ .halt_reg = 0x5203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_tfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_cphy_rx_clk = {
+ .halt_reg = 0x52080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_csid_clk = {
+ .halt_reg = 0x520cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_csid_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_tfe_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_2_clk = {
+ .halt_reg = 0x5205c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5205c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_tfe_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_2_cphy_rx_clk = {
+ .halt_reg = 0x52084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_2_csid_clk = {
+ .halt_reg = 0x520ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_csid_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_tfe_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x58028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1a084,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1a084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+ .halt_reg = 0x2b004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_gnoc_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div gcc_disp_gpll0_clk_src = {
+ .reg = 0x17058,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_disp_gpll0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_throttle_core_clk = {
+ .halt_reg = 0x17064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0x1702c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_xo_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x4d000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x4e000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x4f000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x36004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll0_out_aux2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+ .halt_reg = 0x36100,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x36100,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x3600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x36018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_throttle_core_clk = {
+ .halt_reg = 0x36048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36048,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(31),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x2000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x20004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x20008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x21004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x21004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x17060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17060,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x17018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x36040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_gpu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x17010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x1f014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x1f00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x1f144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1f274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x1f3a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x1f4d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x1f604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x1f734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x1f004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1f004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x1f008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1f008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x38008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT /* | CLK_ENABLE_HAND_OFF */,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x3800c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3800c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x2b06c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b06c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_ufs_phy_axi_clk = {
+ .halt_reg = 0x45098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x45098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1a080,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1a080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x45014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x45014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x45010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x45010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x45044,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x45044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x45078,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x45078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x4501c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x45018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x45018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x45040,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x45040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x1a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x1a018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x1a014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x9f000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x1a054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x1a058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1a058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vcodec0_axi_clk = {
+ .halt_reg = 0x6e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vcodec0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_ahb_clk = {
+ .halt_reg = 0x6e010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_ctl_axi_clk = {
+ .halt_reg = 0x6e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x1701c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1701c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_throttle_core_clk = {
+ .halt_reg = 0x17068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_vcodec0_sys_clk = {
+ .halt_reg = 0x580a4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x580a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x580a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_vcodec0_sys_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_video_venus_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_venus_ctl_clk = {
+ .halt_reg = 0x5808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_venus_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_video_venus_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0x17024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_camss_top_gdsc = {
+ .gdscr = 0x58004,
+ .pd = {
+ .name = "gcc_camss_top",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc gcc_ufs_phy_gdsc = {
+ .gdscr = 0x45004,
+ .pd = {
+ .name = "gcc_ufs_phy",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0x1a004,
+ .pd = {
+ .name = "gcc_usb30_prim",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc gcc_vcodec0_gdsc = {
+ .gdscr = 0x58098,
+ .pd = {
+ .name = "gcc_vcodec0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc gcc_venus_gdsc = {
+ .gdscr = 0x5807c,
+ .pd = {
+ .name = "gcc_venus",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d060,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
+ .gdscr = 0x7d060,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_rt_gdsc = {
+ .gdscr = 0x7d074,
+ .pd = {
+ .name = "hlos1_vote_mm_snoc_mmu_tbu_rt",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_nrt_gdsc = {
+ .gdscr = 0x7d078,
+ .pd = {
+ .name = "hlos1_vote_mm_snoc_mmu_tbu_nrt",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_sm6115_clocks[] = {
+ [GCC_AHB2PHY_CSI_CLK] = &gcc_ahb2phy_csi_clk.clkr,
+ [GCC_AHB2PHY_USB_CLK] = &gcc_ahb2phy_usb_clk.clkr,
+ [GCC_BIMC_GPU_AXI_CLK] = &gcc_bimc_gpu_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAM_THROTTLE_NRT_CLK] = &gcc_cam_throttle_nrt_clk.clkr,
+ [GCC_CAM_THROTTLE_RT_CLK] = &gcc_cam_throttle_rt_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CAMSS_AXI_CLK] = &gcc_camss_axi_clk.clkr,
+ [GCC_CAMSS_AXI_CLK_SRC] = &gcc_camss_axi_clk_src.clkr,
+ [GCC_CAMSS_CAMNOC_ATB_CLK] = &gcc_camss_camnoc_atb_clk.clkr,
+ [GCC_CAMSS_CAMNOC_NTS_XO_CLK] = &gcc_camss_camnoc_nts_xo_clk.clkr,
+ [GCC_CAMSS_CCI_0_CLK] = &gcc_camss_cci_0_clk.clkr,
+ [GCC_CAMSS_CCI_CLK_SRC] = &gcc_camss_cci_clk_src.clkr,
+ [GCC_CAMSS_CPHY_0_CLK] = &gcc_camss_cphy_0_clk.clkr,
+ [GCC_CAMSS_CPHY_1_CLK] = &gcc_camss_cphy_1_clk.clkr,
+ [GCC_CAMSS_CPHY_2_CLK] = &gcc_camss_cphy_2_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK_SRC] = &gcc_camss_csi0phytimer_clk_src.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK_SRC] = &gcc_camss_csi1phytimer_clk_src.clkr,
+ [GCC_CAMSS_CSI2PHYTIMER_CLK] = &gcc_camss_csi2phytimer_clk.clkr,
+ [GCC_CAMSS_CSI2PHYTIMER_CLK_SRC] = &gcc_camss_csi2phytimer_clk_src.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK_SRC] = &gcc_camss_mclk0_clk_src.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK_SRC] = &gcc_camss_mclk1_clk_src.clkr,
+ [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+ [GCC_CAMSS_MCLK2_CLK_SRC] = &gcc_camss_mclk2_clk_src.clkr,
+ [GCC_CAMSS_MCLK3_CLK] = &gcc_camss_mclk3_clk.clkr,
+ [GCC_CAMSS_MCLK3_CLK_SRC] = &gcc_camss_mclk3_clk_src.clkr,
+ [GCC_CAMSS_NRT_AXI_CLK] = &gcc_camss_nrt_axi_clk.clkr,
+ [GCC_CAMSS_OPE_AHB_CLK] = &gcc_camss_ope_ahb_clk.clkr,
+ [GCC_CAMSS_OPE_AHB_CLK_SRC] = &gcc_camss_ope_ahb_clk_src.clkr,
+ [GCC_CAMSS_OPE_CLK] = &gcc_camss_ope_clk.clkr,
+ [GCC_CAMSS_OPE_CLK_SRC] = &gcc_camss_ope_clk_src.clkr,
+ [GCC_CAMSS_RT_AXI_CLK] = &gcc_camss_rt_axi_clk.clkr,
+ [GCC_CAMSS_TFE_0_CLK] = &gcc_camss_tfe_0_clk.clkr,
+ [GCC_CAMSS_TFE_0_CLK_SRC] = &gcc_camss_tfe_0_clk_src.clkr,
+ [GCC_CAMSS_TFE_0_CPHY_RX_CLK] = &gcc_camss_tfe_0_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_0_CSID_CLK] = &gcc_camss_tfe_0_csid_clk.clkr,
+ [GCC_CAMSS_TFE_0_CSID_CLK_SRC] = &gcc_camss_tfe_0_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_1_CLK] = &gcc_camss_tfe_1_clk.clkr,
+ [GCC_CAMSS_TFE_1_CLK_SRC] = &gcc_camss_tfe_1_clk_src.clkr,
+ [GCC_CAMSS_TFE_1_CPHY_RX_CLK] = &gcc_camss_tfe_1_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_1_CSID_CLK] = &gcc_camss_tfe_1_csid_clk.clkr,
+ [GCC_CAMSS_TFE_1_CSID_CLK_SRC] = &gcc_camss_tfe_1_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_2_CLK] = &gcc_camss_tfe_2_clk.clkr,
+ [GCC_CAMSS_TFE_2_CLK_SRC] = &gcc_camss_tfe_2_clk_src.clkr,
+ [GCC_CAMSS_TFE_2_CPHY_RX_CLK] = &gcc_camss_tfe_2_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_2_CSID_CLK] = &gcc_camss_tfe_2_csid_clk.clkr,
+ [GCC_CAMSS_TFE_2_CSID_CLK_SRC] = &gcc_camss_tfe_2_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_CPHY_RX_CLK_SRC] = &gcc_camss_tfe_cphy_rx_clk_src.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK_SRC] = &gcc_camss_top_ahb_clk_src.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+ [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_THROTTLE_CORE_CLK] = &gcc_disp_throttle_core_clk.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_THROTTLE_CORE_CLK] = &gcc_gpu_throttle_core_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_GPU_CFG_AHB_CLK] = &gcc_qmip_gpu_cfg_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_SYS_NOC_UFS_PHY_AXI_CLK] = &gcc_sys_noc_ufs_phy_axi_clk.clkr,
+ [GCC_SYS_NOC_USB3_PRIM_AXI_CLK] = &gcc_sys_noc_usb3_prim_axi_clk.clkr,
+ [GCC_UFS_CLKREF_CLK] = &gcc_ufs_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_VCODEC0_AXI_CLK] = &gcc_vcodec0_axi_clk.clkr,
+ [GCC_VENUS_AHB_CLK] = &gcc_venus_ahb_clk.clkr,
+ [GCC_VENUS_CTL_AXI_CLK] = &gcc_venus_ctl_axi_clk.clkr,
+ [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_THROTTLE_CORE_CLK] = &gcc_video_throttle_core_clk.clkr,
+ [GCC_VIDEO_VCODEC0_SYS_CLK] = &gcc_video_vcodec0_sys_clk.clkr,
+ [GCC_VIDEO_VENUS_CLK_SRC] = &gcc_video_venus_clk_src.clkr,
+ [GCC_VIDEO_VENUS_CTL_CLK] = &gcc_video_venus_ctl_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_AUX2] = &gpll0_out_aux2.clkr,
+ [GPLL0_OUT_MAIN] = &gpll0_out_main.clkr,
+ [GPLL10] = &gpll10.clkr,
+ [GPLL10_OUT_MAIN] = &gpll10_out_main.clkr,
+ [GPLL11] = &gpll11.clkr,
+ [GPLL11_OUT_MAIN] = &gpll11_out_main.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_OUT_MAIN] = &gpll4_out_main.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL6_OUT_MAIN] = &gpll6_out_main.clkr,
+ [GPLL7] = &gpll7.clkr,
+ [GPLL7_OUT_MAIN] = &gpll7_out_main.clkr,
+ [GPLL8] = &gpll8.clkr,
+ [GPLL8_OUT_MAIN] = &gpll8_out_main.clkr,
+ [GPLL9] = &gpll9.clkr,
+ [GPLL9_OUT_MAIN] = &gpll9_out_main.clkr,
+};
+
+static const struct qcom_reset_map gcc_sm6115_resets[] = {
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x1c000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x1c004 },
+ [GCC_SDCC1_BCR] = { 0x38000 },
+ [GCC_SDCC2_BCR] = { 0x1e000 },
+ [GCC_UFS_PHY_BCR] = { 0x45000 },
+ [GCC_USB30_PRIM_BCR] = { 0x1a000 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x1d000 },
+ [GCC_USB3PHY_PHY_PRIM_SP0_BCR] = { 0x1b008 },
+ [GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x1b000 },
+ [GCC_VCODEC0_BCR] = { 0x58094 },
+ [GCC_VENUS_BCR] = { 0x58078 },
+ [GCC_VIDEO_INTERFACE_BCR] = { 0x6e000 },
+};
+
+static struct gdsc *gcc_sm6115_gdscs[] = {
+ [GCC_CAMSS_TOP_GDSC] = &gcc_camss_top_gdsc,
+ [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+ [GCC_VCODEC0_GDSC] = &gcc_vcodec0_gdsc,
+ [GCC_VENUS_GDSC] = &gcc_venus_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU1_GDSC] = &hlos1_vote_turing_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU0_GDSC] = &hlos1_vote_turing_mmu_tbu0_gdsc,
+ [HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_rt_gdsc,
+ [HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_nrt_gdsc,
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+};
+
+static const struct regmap_config gcc_sm6115_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xc7000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm6115_desc = {
+ .config = &gcc_sm6115_regmap_config,
+ .clks = gcc_sm6115_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm6115_clocks),
+ .resets = gcc_sm6115_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm6115_resets),
+ .gdscs = gcc_sm6115_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm6115_gdscs),
+};
+
+static const struct of_device_id gcc_sm6115_match_table[] = {
+ { .compatible = "qcom,gcc-sm6115" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm6115_match_table);
+
+static int gcc_sm6115_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm6115_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ clk_alpha_pll_configure(&gpll8, regmap, &gpll8_config);
+ clk_alpha_pll_configure(&gpll9, regmap, &gpll9_config);
+ clk_alpha_pll_configure(&gpll10, regmap, &gpll10_config);
+ clk_alpha_pll_configure(&gpll11, regmap, &gpll11_config);
+
+ return qcom_cc_really_probe(pdev, &gcc_sm6115_desc, regmap);
+}
+
+static struct platform_driver gcc_sm6115_driver = {
+ .probe = gcc_sm6115_probe,
+ .driver = {
+ .name = "gcc-sm6115",
+ .of_match_table = gcc_sm6115_match_table,
+ },
+};
+
+static int __init gcc_sm6115_init(void)
+{
+ return platform_driver_register(&gcc_sm6115_driver);
+}
+subsys_initcall(gcc_sm6115_init);
+
+static void __exit gcc_sm6115_exit(void)
+{
+ platform_driver_unregister(&gcc_sm6115_driver);
+}
+module_exit(gcc_sm6115_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM6115 and SM4250 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-sm6115");
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
new file mode 100644
index 000000000000..3236706771b1
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -0,0 +1,2584 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sm6350.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_ODD,
+ P_GPLL6_OUT_EVEN,
+ P_GPLL7_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_odd[] = {
+ { 0x3, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_gpll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x6000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll6_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll6_out_even = {
+ .offset = 0x6000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll6_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll7",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_EVEN, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6_out_even.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_ODD, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2_ao[] = {
+ { .fw_name = "bi_tcxo_ao" },
+ { .hw = &gpll0_out_odd.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_ODD, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_odd.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_ODD, 2 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0_out_odd.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk" }
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL6_OUT_EVEN, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll6_out_even.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_ODD, 2 },
+ { P_GPLL7_OUT_MAIN, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0_out_odd.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+};
+
+static struct clk_regmap_div gcc_gpu_gpll0_main_div_clk_src = {
+ .reg = 0x4514C,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_main_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_npu_pll0_main_div_clk_src = {
+ .reg = 0x4ce00,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_npu_pll0_main_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x30014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_ODD, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_ODD, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x37004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x38004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x39004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_EVEN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x23010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(128000000, P_GPLL6_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x21148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x21278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x213a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x214d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x21608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x21738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x22018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x22148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x22278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x223a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x224d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x22608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_EVEN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x4b024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x4b00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_ODD, 8, 0, 0),
+ F(50000000, P_GPLL0_OUT_ODD, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_ODD, 2, 0, 0),
+ F(202000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x2000c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_ODD, 8, 0, 0),
+ F(50000000, P_GPLL0_OUT_ODD, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_ODD, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_ODD, 1, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x3a01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x3a048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x3a0b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x3a060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GPLL0_OUT_ODD, 3, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x1a01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1a034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1a060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x3e014,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x3e014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3e014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x3e014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3e014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3e014,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x3e014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3e014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3e014,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x3e010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3e010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x26004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_axi_clk = {
+ .halt_reg = 0x17018,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_throttle_nrt_axi_clk = {
+ .halt_reg = 0x17078,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x17078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_throttle_nrt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_throttle_rt_axi_clk = {
+ .halt_reg = 0x17024,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x17024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_throttle_rt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+ .halt_reg = 0x17030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_xo_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x2b00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b00c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x2b008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x2b004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1101c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1101c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x30000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x30000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+ .halt_reg = 0x30004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x30004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_gnoc_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x30008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x30008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x2d038,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x2d038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_axi_clk = {
+ .halt_reg = 0x1701c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1701c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_cc_sleep_clk = {
+ .halt_reg = 0x17074,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x17074,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_cc_xo_clk = {
+ .halt_reg = 0x17070,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17070,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17070,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_cc_xo_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_clk = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_throttle_axi_clk = {
+ .halt_reg = 0x17028,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_throttle_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0x17034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x37000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x37000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x38000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x39000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x45004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x45004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gpu_gpll0_main_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x4500c,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x4500c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x45014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x45014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_axi_clk = {
+ .halt_reg = 0x4c008,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x4c008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_bwmon_axi_clk = {
+ .halt_reg = 0x4d004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x4d004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_bwmon_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_bwmon_dma_cfg_ahb_clk = {
+ .halt_reg = 0x4d008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_bwmon_dma_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_bwmon_dsp_cfg_ahb_clk = {
+ .halt_reg = 0x4d00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_bwmon_dsp_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_cfg_ahb_clk = {
+ .halt_reg = 0x4c004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x4c004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_cfg_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_dma_clk = {
+ .halt_reg = 0x4c140,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x4c140,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4c140,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_dma_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_clk = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_div_clk = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_div_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_npu_pll0_main_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x2300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x23004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x23004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x23008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x24004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x24004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x21014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x2100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x21144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x21274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x213a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x214d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x21604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x21734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x22004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x22008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x22014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x22144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x22274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x223a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x224d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x22604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x21004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x21004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x21008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x21008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x2200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x22010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x22010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x4b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x4b008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x4b03c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x4b03c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x20008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x10140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10140,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x3a00c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3a00c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x3a034,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3a034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3a034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x3a0a4,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3a0a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3a0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x3a0a4,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3a0a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3a0a4,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x3a0ac,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3a0ac,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3a0ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x3a0ac,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3a0ac,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3a0ac,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x3a014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x3a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x3a018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x3a018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x3a010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x3a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x3a09c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3a09c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3a09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x3a09c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3a09c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3a09c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x1a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x1a018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x1a014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x1a050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x1a054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x1a058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1a058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_throttle_axi_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_throttle_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0x1702c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x1a004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x3a004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0xb7040,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0xb7044,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_sm6350_clocks[] = {
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMERA_AXI_CLK] = &gcc_camera_axi_clk.clkr,
+ [GCC_CAMERA_THROTTLE_NRT_AXI_CLK] =
+ &gcc_camera_throttle_nrt_axi_clk.clkr,
+ [GCC_CAMERA_THROTTLE_RT_AXI_CLK] = &gcc_camera_throttle_rt_axi_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+ [GCC_DISP_AXI_CLK] = &gcc_disp_axi_clk.clkr,
+ [GCC_DISP_CC_SLEEP_CLK] = &gcc_disp_cc_sleep_clk.clkr,
+ [GCC_DISP_CC_XO_CLK] = &gcc_disp_cc_xo_clk.clkr,
+ [GCC_DISP_GPLL0_CLK] = &gcc_disp_gpll0_clk.clkr,
+ [GCC_DISP_THROTTLE_AXI_CLK] = &gcc_disp_throttle_axi_clk.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK] = &gcc_gpu_gpll0_clk.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK] = &gcc_gpu_gpll0_div_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr,
+ [GCC_NPU_BWMON_AXI_CLK] = &gcc_npu_bwmon_axi_clk.clkr,
+ [GCC_NPU_BWMON_DMA_CFG_AHB_CLK] = &gcc_npu_bwmon_dma_cfg_ahb_clk.clkr,
+ [GCC_NPU_BWMON_DSP_CFG_AHB_CLK] = &gcc_npu_bwmon_dsp_cfg_ahb_clk.clkr,
+ [GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr,
+ [GCC_NPU_DMA_CLK] = &gcc_npu_dma_clk.clkr,
+ [GCC_NPU_GPLL0_CLK] = &gcc_npu_gpll0_clk.clkr,
+ [GCC_NPU_GPLL0_DIV_CLK] = &gcc_npu_gpll0_div_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+ [GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+ [GCC_VIDEO_THROTTLE_AXI_CLK] = &gcc_video_throttle_axi_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL0_OUT_ODD] = &gpll0_out_odd.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL6_OUT_EVEN] = &gpll6_out_even.clkr,
+ [GPLL7] = &gpll7.clkr,
+ [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] =
+ &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] =
+ &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] =
+ &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+ [GCC_GPU_GPLL0_MAIN_DIV_CLK_SRC] = &gcc_gpu_gpll0_main_div_clk_src.clkr,
+ [GCC_NPU_PLL0_MAIN_DIV_CLK_SRC] = &gcc_npu_pll0_main_div_clk_src.clkr,
+};
+
+static struct gdsc *gcc_sm6350_gdscs[] = {
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+};
+
+static const struct qcom_reset_map gcc_sm6350_resets[] = {
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x1d000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x1e000 },
+ [GCC_SDCC1_BCR] = { 0x4b000 },
+ [GCC_SDCC2_BCR] = { 0x20000 },
+ [GCC_UFS_PHY_BCR] = { 0x3a000 },
+ [GCC_USB30_PRIM_BCR] = { 0x1a000 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x1c000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x1c008 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+};
+
+static const struct regmap_config gcc_sm6350_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xbf030,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm6350_desc = {
+ .config = &gcc_sm6350_regmap_config,
+ .clks = gcc_sm6350_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm6350_clocks),
+ .resets = gcc_sm6350_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm6350_resets),
+ .gdscs = gcc_sm6350_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm6350_gdscs),
+};
+
+static const struct of_device_id gcc_sm6350_match_table[] = {
+ { .compatible = "qcom,gcc-sm6350" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm6350_match_table);
+
+static int gcc_sm6350_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm6350_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* Disable the GPLL0 active input to NPU and GPU via MISC registers */
+ regmap_update_bits(regmap, 0x4cf00, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x45f00, 0x3, 0x3);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(pdev, &gcc_sm6350_desc, regmap);;
+}
+
+static struct platform_driver gcc_sm6350_driver = {
+ .probe = gcc_sm6350_probe,
+ .driver = {
+ .name = "gcc-sm6350",
+ .of_match_table = gcc_sm6350_match_table,
+ },
+};
+
+static int __init gcc_sm6350_init(void)
+{
+ return platform_driver_register(&gcc_sm6350_driver);
+}
+core_initcall(gcc_sm6350_init);
+
+static void __exit gcc_sm6350_exit(void)
+{
+ platform_driver_unregister(&gcc_sm6350_driver);
+}
+module_exit(gcc_sm6350_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM6350 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-sc7280.c b/drivers/clk/qcom/gpucc-sc7280.c
new file mode 100644
index 000000000000..9a832f2bcf49
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sc7280.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sc7280.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPU_GPLL0_CLK_SRC,
+ P_GCC_GPU_GPLL0_DIV_CLK_SRC,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+/* 500MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x1A,
+ .alpha = 0xAAA,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GCC_GPU_GPLL0_CLK_SRC, 5 },
+ { P_GCC_GPU_GPLL0_DIV_CLK_SRC, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk_src" },
+ { .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GCC_GPU_GPLL0_CLK_SRC, 5 },
+ { P_GCC_GPU_GPLL0_DIV_CLK_SRC, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo", },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk_src", },
+ { .fw_name = "gcc_gpu_gpll0_div_clk_src", },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GCC_GPU_GPLL0_DIV_CLK_SRC, 1.5, 0, 0),
+ F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(150000000, P_GCC_GPU_GPLL0_DIV_CLK_SRC, 2, 0, 0),
+ F(240000000, P_GCC_GPU_GPLL0_CLK_SRC, 2.5, 0, 0),
+ F(300000000, P_GCC_GPU_GPLL0_CLK_SRC, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x117c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_ahb_div_clk_src = {
+ .reg = 0x11c0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpu_cc_hub_ahb_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_cx_int_div_clk_src = {
+ .reg = 0x11bc,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x1078,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x1064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x5000,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x5000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x1178,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1178,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x1204,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1204,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_0_gfx3d_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_mnd1x_0_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_1_gfx3d_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_mnd1x_1_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .pd = {
+ .name = "cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gx_gdsc = {
+ .gdscr = 0x100c,
+ .clamp_io_ctrl = 0x1508,
+ .pd = {
+ .name = "gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc *gpu_cc_sc7180_gdscs[] = {
+ [GPU_CC_CX_GDSC] = &cx_gdsc,
+ [GPU_CC_GX_GDSC] = &gx_gdsc,
+};
+
+static struct clk_regmap *gpu_cc_sc7280_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AHB_DIV_CLK_SRC] = &gpu_cc_hub_ahb_div_clk_src.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_HUB_CX_INT_DIV_CLK_SRC] = &gpu_cc_hub_cx_int_div_clk_src.clkr,
+ [GPU_CC_MND1X_0_GFX3D_CLK] = &gpu_cc_mnd1x_0_gfx3d_clk.clkr,
+ [GPU_CC_MND1X_1_GFX3D_CLK] = &gpu_cc_mnd1x_1_gfx3d_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static const struct regmap_config gpu_cc_sc7280_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8030,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sc7280_desc = {
+ .config = &gpu_cc_sc7280_regmap_config,
+ .clks = gpu_cc_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sc7280_clocks),
+ .gdscs = gpu_cc_sc7180_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sc7180_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sc7280_match_table[] = {
+ { .compatible = "qcom,sc7280-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sc7280_match_table);
+
+static int gpu_cc_sc7280_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sc7280_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /*
+ * Keep the clocks always-ON
+ * GPU_CC_CB_CLK, GPUCC_CX_GMU_CLK
+ */
+ regmap_update_bits(regmap, 0x1170, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x1098, BIT(0), BIT(0));
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sc7280_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sc7280_driver = {
+ .probe = gpu_cc_sc7280_probe,
+ .driver = {
+ .name = "gpu_cc-sc7280",
+ .of_match_table = gpu_cc_sc7280_match_table,
+ },
+};
+
+static int __init gpu_cc_sc7280_init(void)
+{
+ return platform_driver_register(&gpu_cc_sc7280_driver);
+}
+subsys_initcall(gpu_cc_sc7280_init);
+
+static void __exit gpu_cc_sc7280_exit(void)
+{
+ platform_driver_unregister(&gpu_cc_sc7280_driver);
+}
+module_exit(gpu_cc_sc7280_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC SC7280 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c
index 80fb6f73601d..8422fd047493 100644
--- a/drivers/clk/qcom/gpucc-sm8150.c
+++ b/drivers/clk/qcom/gpucc-sm8150.c
@@ -82,6 +82,14 @@ static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src_sc8180x[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+ F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 gpu_cc_gmu_clk_src = {
.cmd_rcgr = 0x1120,
.mnd_width = 0,
@@ -277,6 +285,7 @@ static const struct qcom_cc_desc gpu_cc_sm8150_desc = {
};
static const struct of_device_id gpu_cc_sm8150_match_table[] = {
+ { .compatible = "qcom,sc8180x-gpucc" },
{ .compatible = "qcom,sm8150-gpucc" },
{ }
};
@@ -290,6 +299,9 @@ static int gpu_cc_sm8150_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sc8180x-gpucc"))
+ gpu_cc_gmu_clk_src.freq_tbl = ftbl_gpu_cc_gmu_clk_src_sc8180x;
+
clk_trion_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
return qcom_cc_really_probe(pdev, &gpu_cc_sm8150_desc, regmap);
diff --git a/drivers/clk/qcom/lpass-gfm-sm8250.c b/drivers/clk/qcom/lpass-gfm-sm8250.c
index f5e31e692b9b..96f476f24eb2 100644
--- a/drivers/clk/qcom/lpass-gfm-sm8250.c
+++ b/drivers/clk/qcom/lpass-gfm-sm8250.c
@@ -251,15 +251,18 @@ static int lpass_gfm_clk_driver_probe(struct platform_device *pdev)
if (IS_ERR(cc->base))
return PTR_ERR(cc->base);
- pm_runtime_enable(dev);
- err = pm_clk_create(dev);
+ err = devm_pm_runtime_enable(dev);
if (err)
- goto pm_clk_err;
+ return err;
+
+ err = devm_pm_clk_create(dev);
+ if (err)
+ return err;
err = of_pm_clk_add_clks(dev);
if (err < 0) {
dev_dbg(dev, "Failed to get lpass core voting clocks\n");
- goto clk_reg_err;
+ return err;
}
for (i = 0; i < data->onecell_data->num; i++) {
@@ -273,22 +276,16 @@ static int lpass_gfm_clk_driver_probe(struct platform_device *pdev)
err = devm_clk_hw_register(dev, &data->gfm_clks[i]->hw);
if (err)
- goto clk_reg_err;
+ return err;
}
err = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
data->onecell_data);
if (err)
- goto clk_reg_err;
+ return err;
return 0;
-
-clk_reg_err:
- pm_clk_destroy(dev);
-pm_clk_err:
- pm_runtime_disable(dev);
- return err;
}
static const struct of_device_id lpass_gfm_clk_match_table[] = {
diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
index 2e0ecc38efdd..ac09b7b840ab 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
@@ -356,32 +356,18 @@ static const struct qcom_cc_desc lpass_audio_hm_sc7180_desc = {
.num_gdscs = ARRAY_SIZE(lpass_audio_hm_sc7180_gdscs),
};
-static void lpass_pm_runtime_disable(void *data)
-{
- pm_runtime_disable(data);
-}
-
-static void lpass_pm_clk_destroy(void *data)
-{
- pm_clk_destroy(data);
-}
-
static int lpass_create_pm_clks(struct platform_device *pdev)
{
int ret;
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
- pm_runtime_enable(&pdev->dev);
- ret = devm_add_action_or_reset(&pdev->dev, lpass_pm_runtime_disable, &pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
if (ret)
return ret;
- ret = pm_clk_create(&pdev->dev);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(&pdev->dev, lpass_pm_clk_destroy, &pdev->dev);
+ ret = devm_pm_clk_create(&pdev->dev);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/mmcc-msm8994.c b/drivers/clk/qcom/mmcc-msm8994.c
new file mode 100644
index 000000000000..89c5f5fa7d9a
--- /dev/null
+++ b/drivers/clk/qcom/mmcc-msm8994.c
@@ -0,0 +1,2620 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/clk.h>
+
+#include <dt-bindings/clock/qcom,mmcc-msm8994.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-alpha-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_MMPLL0,
+ P_MMPLL1,
+ P_MMPLL3,
+ P_MMPLL4,
+ P_MMPLL5, /* Is this one even used by anything? Downstream doesn't tell. */
+ P_DSI0PLL,
+ P_DSI1PLL,
+ P_DSI0PLL_BYTE,
+ P_DSI1PLL_BYTE,
+ P_HDMIPLL,
+};
+static const struct parent_map mmcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 }
+};
+
+static const struct clk_parent_data mmcc_xo_gpll0[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "gpll0" },
+};
+
+static const struct parent_map mmss_xo_hdmi_map[] = {
+ { P_XO, 0 },
+ { P_HDMIPLL, 3 }
+};
+
+static const struct clk_parent_data mmss_xo_hdmi[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "hdmipll" },
+};
+
+static const struct parent_map mmcc_xo_dsi0pll_dsi1pll_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 1 },
+ { P_DSI1PLL, 2 }
+};
+
+static const struct clk_parent_data mmcc_xo_dsi0pll_dsi1pll[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pll" },
+ { .fw_name = "dsi1pll" },
+};
+
+static const struct parent_map mmcc_xo_dsibyte_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 2 }
+};
+
+static const struct clk_parent_data mmcc_xo_dsibyte[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pllbyte" },
+ { .fw_name = "dsi1pllbyte" },
+};
+
+static struct pll_vco mmpll_p_vco[] = {
+ { 250000000, 500000000, 3 },
+ { 500000000, 1000000000, 2 },
+ { 1000000000, 1500000000, 1 },
+ { 1500000000, 2000000000, 0 },
+};
+
+static struct pll_vco mmpll_t_vco[] = {
+ { 500000000, 1500000000, 0 },
+};
+
+static const struct alpha_pll_config mmpll_p_config = {
+ .post_div_mask = 0xf00,
+};
+
+static struct clk_alpha_pll mmpll0_early = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = mmpll_p_vco,
+ .num_vco = ARRAY_SIZE(mmpll_p_vco),
+ .clkr = {
+ .enable_reg = 0x100,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll0_early",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll0",
+ .parent_hws = (const struct clk_hw *[]){ &mmpll0_early.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll mmpll1_early = {
+ .offset = 0x30,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = mmpll_p_vco,
+ .num_vco = ARRAY_SIZE(mmpll_p_vco),
+ .clkr = {
+ .enable_reg = 0x100,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll1_early",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ }
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll1 = {
+ .offset = 0x30,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll1",
+ .parent_hws = (const struct clk_hw *[]){ &mmpll1_early.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll mmpll3_early = {
+ .offset = 0x60,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = mmpll_p_vco,
+ .num_vco = ARRAY_SIZE(mmpll_p_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll3_early",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll3 = {
+ .offset = 0x60,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll3",
+ .parent_hws = (const struct clk_hw *[]){ &mmpll3_early.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll mmpll4_early = {
+ .offset = 0x90,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = mmpll_t_vco,
+ .num_vco = ARRAY_SIZE(mmpll_t_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll4_early",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll4 = {
+ .offset = 0x90,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll4",
+ .parent_hws = (const struct clk_hw *[]){ &mmpll4_early.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct parent_map mmcc_xo_gpll0_mmpll1_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 },
+ { P_MMPLL1, 2 }
+};
+
+static const struct clk_parent_data mmcc_xo_gpll0_mmpll1[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "gpll0" },
+ { .hw = &mmpll1.clkr.hw },
+};
+
+static const struct parent_map mmcc_xo_gpll0_mmpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 },
+ { P_MMPLL0, 1 }
+};
+
+static const struct clk_parent_data mmcc_xo_gpll0_mmpll0[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "gpll0" },
+ { .hw = &mmpll0.clkr.hw },
+};
+
+static const struct parent_map mmcc_xo_gpll0_mmpll0_mmpll3_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL3, 3 }
+};
+
+static const struct clk_parent_data mmcc_xo_gpll0_mmpll0_mmpll3[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "gpll0" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll3.clkr.hw },
+};
+
+static const struct parent_map mmcc_xo_gpll0_mmpll0_mmpll4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL4, 3 }
+};
+
+static const struct clk_parent_data mmcc_xo_gpll0_mmpll0_mmpll4[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "gpll0" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+};
+
+static struct clk_alpha_pll mmpll5_early = {
+ .offset = 0xc0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = mmpll_p_vco,
+ .num_vco = ARRAY_SIZE(mmpll_p_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll5_early",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll5 = {
+ .offset = 0xc0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll5",
+ .parent_hws = (const struct clk_hw *[]){ &mmpll5_early.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_ahb_clk_src[] = {
+ /* Note: There might be more frequencies desired here. */
+ F(19200000, P_XO, 1, 0, 0),
+ F(40000000, P_GPLL0, 15, 0, 0),
+ F(80000000, P_MMPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ahb_clk_src = {
+ .cmd_rcgr = 0x5000,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ahb_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_axi_clk_src[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(333430000, P_MMPLL1, 3.5, 0, 0),
+ F(466800000, P_MMPLL1, 2.5, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_axi_clk_src_8992[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(404000000, P_MMPLL1, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 axi_clk_src = {
+ .cmd_rcgr = 0x5040,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll1_map,
+ .freq_tbl = ftbl_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "axi_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll1,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi0_1_2_3_clk_src[] = {
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_csi0_1_2_3_clk_src_8992[] = {
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x3090,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_csi0_1_2_3_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vcodec0_clk_src[] = {
+ F(66670000, P_GPLL0, 9, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(133330000, P_GPLL0, 4.5, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(200000000, P_MMPLL0, 4, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(510000000, P_MMPLL3, 2, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_vcodec0_clk_src_8992[] = {
+ F(66670000, P_GPLL0, 9, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(133330000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_MMPLL0, 4, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(510000000, P_MMPLL3, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vcodec0_clk_src = {
+ .cmd_rcgr = 0x1000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_mmpll3_map,
+ .freq_tbl = ftbl_vcodec0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vcodec0_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0_mmpll3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0_mmpll3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x3100,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_csi0_1_2_3_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+ .cmd_rcgr = 0x3160,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_csi0_1_2_3_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi3_clk_src = {
+ .cmd_rcgr = 0x31c0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_csi0_1_2_3_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi3_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vfe0_clk_src[] = {
+ F(80000000, P_GPLL0, 7.5, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ F(480000000, P_MMPLL4, 2, 0, 0),
+ F(533330000, P_MMPLL0, 1.5, 0, 0),
+ F(600000000, P_GPLL0, 1, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_vfe0_1_clk_src_8992[] = {
+ F(80000000, P_GPLL0, 7.5, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(480000000, P_MMPLL4, 2, 0, 0),
+ F(600000000, P_GPLL0, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x3600,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_mmpll4_map,
+ .freq_tbl = ftbl_vfe0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe0_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0_mmpll4,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0_mmpll4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vfe1_clk_src[] = {
+ F(80000000, P_GPLL0, 7.5, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ F(533330000, P_MMPLL0, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+ .cmd_rcgr = 0x3620,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_mmpll4_map,
+ .freq_tbl = ftbl_vfe1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe1_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0_mmpll4,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0_mmpll4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cpp_clk_src[] = {
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(480000000, P_MMPLL4, 2, 0, 0),
+ F(600000000, P_GPLL0, 1, 0, 0),
+ F(640000000, P_MMPLL4, 1.5, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_cpp_clk_src_8992[] = {
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(480000000, P_MMPLL4, 2, 0, 0),
+ F(640000000, P_MMPLL4, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+ .cmd_rcgr = 0x3640,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_mmpll4_map,
+ .freq_tbl = ftbl_cpp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cpp_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0_mmpll4,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0_mmpll4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_jpeg0_1_clk_src[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(228570000, P_MMPLL0, 3.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(480000000, P_MMPLL4, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg1_clk_src = {
+ .cmd_rcgr = 0x3520,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_mmpll4_map,
+ .freq_tbl = ftbl_jpeg0_1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg1_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0_mmpll4,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0_mmpll4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_jpeg2_clk_src[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(133330000, P_GPLL0, 4.5, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(228570000, P_MMPLL0, 3.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg2_clk_src = {
+ .cmd_rcgr = 0x3540,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_jpeg2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg2_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi2phytimer_clk_src[] = {
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_MMPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x3060,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_csi2phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2phytimer_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_fd_core_clk_src[] = {
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 fd_core_clk_src = {
+ .cmd_rcgr = 0x3b00,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_fd_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "fd_core_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src[] = {
+ F(85710000, P_GPLL0, 7, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(120000000, P_GPLL0, 5, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(171430000, P_GPLL0, 3.5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src_8992[] = {
+ F(85710000, P_GPLL0, 7, 0, 0),
+ F(171430000, P_GPLL0, 3.5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x2040,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mdp_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x2000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi0pll_dsi1pll_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk0_clk_src",
+ .parent_data = mmcc_xo_dsi0pll_dsi1pll,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsi0pll_dsi1pll),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x2020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi0pll_dsi1pll_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_data = mmcc_xo_dsi0pll_dsi1pll,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsi0pll_dsi1pll),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const struct freq_tbl ftbl_ocmemnoc_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(228570000, P_MMPLL0, 3.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_ocmemnoc_clk_src_8992[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ocmemnoc_clk_src = {
+ .cmd_rcgr = 0x5090,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_ocmemnoc_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ocmemnoc_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cci_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0, 16, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+ .cmd_rcgr = 0x3300,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_map,
+ .freq_tbl = ftbl_cci_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cci_clk_src",
+ .parent_data = mmcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mmss_gp0_1_clk_src[] = {
+ F(10000, P_XO, 16, 10, 120),
+ F(24000, P_GPLL0, 16, 1, 50),
+ F(6000000, P_GPLL0, 10, 1, 10),
+ F(12000000, P_GPLL0, 10, 1, 5),
+ F(13000000, P_GPLL0, 4, 13, 150),
+ F(24000000, P_GPLL0, 5, 1, 5),
+ { }
+};
+
+static struct clk_rcg2 mmss_gp0_clk_src = {
+ .cmd_rcgr = 0x3420,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_map,
+ .freq_tbl = ftbl_mmss_gp0_1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmss_gp0_clk_src",
+ .parent_data = mmcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mmss_gp1_clk_src = {
+ .cmd_rcgr = 0x3450,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_map,
+ .freq_tbl = ftbl_mmss_gp0_1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmss_gp1_clk_src",
+ .parent_data = mmcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+ .cmd_rcgr = 0x3500,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_mmpll4_map,
+ .freq_tbl = ftbl_jpeg0_1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg0_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0_mmpll4,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0_mmpll4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 jpeg_dma_clk_src = {
+ .cmd_rcgr = 0x3560,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_mmpll4_map,
+ .freq_tbl = ftbl_jpeg0_1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg_dma_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0_mmpll4,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0_mmpll4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mclk0_1_2_3_clk_src[] = {
+ F(4800000, P_XO, 4, 0, 0),
+ F(6000000, P_GPLL0, 10, 1, 10),
+ F(8000000, P_GPLL0, 15, 1, 5),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_MMPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 5, 1, 5),
+ F(32000000, P_MMPLL0, 5, 1, 5),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ F(64000000, P_MMPLL0, 12.5, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_mclk0_clk_src_8992[] = {
+ F(4800000, P_XO, 4, 0, 0),
+ F(6000000, P_MMPLL4, 10, 1, 16),
+ F(8000000, P_MMPLL4, 10, 1, 12),
+ F(9600000, P_XO, 2, 0, 0),
+ F(12000000, P_MMPLL4, 10, 1, 8),
+ F(16000000, P_MMPLL4, 10, 1, 6),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_MMPLL4, 10, 1, 4),
+ F(32000000, P_MMPLL4, 10, 1, 3),
+ F(48000000, P_MMPLL4, 10, 1, 2),
+ F(64000000, P_MMPLL4, 15, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_mclk1_2_3_clk_src_8992[] = {
+ F(4800000, P_XO, 4, 0, 0),
+ F(6000000, P_MMPLL4, 10, 1, 16),
+ F(8000000, P_MMPLL4, 10, 1, 12),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_MMPLL4, 10, 1, 6),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_MMPLL4, 10, 1, 4),
+ F(32000000, P_MMPLL4, 10, 1, 3),
+ F(48000000, P_MMPLL4, 10, 1, 2),
+ F(64000000, P_MMPLL4, 15, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x3360,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_mclk0_1_2_3_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk0_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x3390,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_mclk0_1_2_3_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk1_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+ .cmd_rcgr = 0x33c0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_mclk0_1_2_3_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk2_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk3_clk_src = {
+ .cmd_rcgr = 0x33f0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_mclk0_1_2_3_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk3_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi0_1phytimer_clk_src[] = {
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_MMPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x3000,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_csi0_1phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0phytimer_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x3030,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_mmpll0_map,
+ .freq_tbl = ftbl_csi0_1phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1phytimer_clk_src",
+ .parent_data = mmcc_xo_gpll0_mmpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_mmpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x2120,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte0_clk_src",
+ .parent_data = mmcc_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x2140,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_data = mmcc_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x2160,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsibyte_map,
+ .freq_tbl = ftbl_mdss_esc0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc0_clk_src",
+ .parent_data = mmcc_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x2180,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsibyte_map,
+ .freq_tbl = ftbl_mdss_esc0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_data = mmcc_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl extpclk_freq_tbl[] = {
+ { .src = P_HDMIPLL },
+ { }
+};
+
+static struct clk_rcg2 extpclk_clk_src = {
+ .cmd_rcgr = 0x2060,
+ .hid_width = 5,
+ .parent_map = mmss_xo_hdmi_map,
+ .freq_tbl = extpclk_freq_tbl,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "extpclk_clk_src",
+ .parent_data = mmss_xo_hdmi,
+ .num_parents = ARRAY_SIZE(mmss_xo_hdmi),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct freq_tbl ftbl_hdmi_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hdmi_clk_src = {
+ .cmd_rcgr = 0x2100,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_map,
+ .freq_tbl = ftbl_hdmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hdmi_clk_src",
+ .parent_data = mmcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_vsync_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x2080,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_map,
+ .freq_tbl = ftbl_mdss_vsync_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vsync_clk_src",
+ .parent_data = mmcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_rbbmtimer_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbbmtimer_clk_src = {
+ .cmd_rcgr = 0x4090,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_map,
+ .freq_tbl = ftbl_rbbmtimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbbmtimer_clk_src",
+ .parent_data = mmcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch camss_ahb_clk = {
+ .halt_reg = 0x348c,
+ .clkr = {
+ .enable_reg = 0x348c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cci_cci_ahb_clk = {
+ .halt_reg = 0x3348,
+ .clkr = {
+ .enable_reg = 0x3348,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cci_cci_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cci_cci_clk = {
+ .halt_reg = 0x3344,
+ .clkr = {
+ .enable_reg = 0x3344,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cci_cci_clk",
+ .parent_hws = (const struct clk_hw *[]){ &cci_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_cpp_ahb_clk = {
+ .halt_reg = 0x36b4,
+ .clkr = {
+ .enable_reg = 0x36b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_cpp_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_cpp_axi_clk = {
+ .halt_reg = 0x36c4,
+ .clkr = {
+ .enable_reg = 0x36c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_cpp_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_cpp_clk = {
+ .halt_reg = 0x36b0,
+ .clkr = {
+ .enable_reg = 0x36b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_cpp_clk",
+ .parent_hws = (const struct clk_hw *[]){ &cpp_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0_ahb_clk = {
+ .halt_reg = 0x30bc,
+ .clkr = {
+ .enable_reg = 0x30bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0_clk = {
+ .halt_reg = 0x30b4,
+ .clkr = {
+ .enable_reg = 0x30b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0phy_clk = {
+ .halt_reg = 0x30c4,
+ .clkr = {
+ .enable_reg = 0x30c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0phy_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0pix_clk = {
+ .halt_reg = 0x30e4,
+ .clkr = {
+ .enable_reg = 0x30e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0rdi_clk = {
+ .halt_reg = 0x30d4,
+ .clkr = {
+ .enable_reg = 0x30d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1_ahb_clk = {
+ .halt_reg = 0x3128,
+ .clkr = {
+ .enable_reg = 0x3128,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1_clk = {
+ .halt_reg = 0x3124,
+ .clkr = {
+ .enable_reg = 0x3124,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1phy_clk = {
+ .halt_reg = 0x3134,
+ .clkr = {
+ .enable_reg = 0x3134,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1phy_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1pix_clk = {
+ .halt_reg = 0x3154,
+ .clkr = {
+ .enable_reg = 0x3154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1rdi_clk = {
+ .halt_reg = 0x3144,
+ .clkr = {
+ .enable_reg = 0x3144,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2_ahb_clk = {
+ .halt_reg = 0x3188,
+ .clkr = {
+ .enable_reg = 0x3188,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2_clk = {
+ .halt_reg = 0x3184,
+ .clkr = {
+ .enable_reg = 0x3184,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2phy_clk = {
+ .halt_reg = 0x3194,
+ .clkr = {
+ .enable_reg = 0x3194,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2phy_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2pix_clk = {
+ .halt_reg = 0x31b4,
+ .clkr = {
+ .enable_reg = 0x31b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2rdi_clk = {
+ .halt_reg = 0x31a4,
+ .clkr = {
+ .enable_reg = 0x31a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3_ahb_clk = {
+ .halt_reg = 0x31e8,
+ .clkr = {
+ .enable_reg = 0x31e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3_clk = {
+ .halt_reg = 0x31e4,
+ .clkr = {
+ .enable_reg = 0x31e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3phy_clk = {
+ .halt_reg = 0x31f4,
+ .clkr = {
+ .enable_reg = 0x31f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3phy_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3pix_clk = {
+ .halt_reg = 0x3214,
+ .clkr = {
+ .enable_reg = 0x3214,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3rdi_clk = {
+ .halt_reg = 0x3204,
+ .clkr = {
+ .enable_reg = 0x3204,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi_vfe0_clk = {
+ .halt_reg = 0x3704,
+ .clkr = {
+ .enable_reg = 0x3704,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi_vfe0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi_vfe1_clk = {
+ .halt_reg = 0x3714,
+ .clkr = {
+ .enable_reg = 0x3714,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi_vfe1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_gp0_clk = {
+ .halt_reg = 0x3444,
+ .clkr = {
+ .enable_reg = 0x3444,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mmss_gp0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_gp1_clk = {
+ .halt_reg = 0x3474,
+ .clkr = {
+ .enable_reg = 0x3474,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mmss_gp1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_ispif_ahb_clk = {
+ .halt_reg = 0x3224,
+ .clkr = {
+ .enable_reg = 0x3224,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_ispif_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_dma_clk = {
+ .halt_reg = 0x35c0,
+ .clkr = {
+ .enable_reg = 0x35c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_dma_clk",
+ .parent_hws = (const struct clk_hw *[]){ &jpeg_dma_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg0_clk = {
+ .halt_reg = 0x35a8,
+ .clkr = {
+ .enable_reg = 0x35a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &jpeg0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg1_clk = {
+ .halt_reg = 0x35ac,
+ .clkr = {
+ .enable_reg = 0x35ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &jpeg1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg2_clk = {
+ .halt_reg = 0x35b0,
+ .clkr = {
+ .enable_reg = 0x35b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &jpeg2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg_ahb_clk = {
+ .halt_reg = 0x35b4,
+ .clkr = {
+ .enable_reg = 0x35b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg_axi_clk = {
+ .halt_reg = 0x35b8,
+ .clkr = {
+ .enable_reg = 0x35b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk0_clk = {
+ .halt_reg = 0x3384,
+ .clkr = {
+ .enable_reg = 0x3384,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk1_clk = {
+ .halt_reg = 0x33b4,
+ .clkr = {
+ .enable_reg = 0x33b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk2_clk = {
+ .halt_reg = 0x33e4,
+ .clkr = {
+ .enable_reg = 0x33e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk3_clk = {
+ .halt_reg = 0x3414,
+ .clkr = {
+ .enable_reg = 0x3414,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk3_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_micro_ahb_clk = {
+ .halt_reg = 0x3494,
+ .clkr = {
+ .enable_reg = 0x3494,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_micro_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_phy0_csi0phytimer_clk = {
+ .halt_reg = 0x3024,
+ .clkr = {
+ .enable_reg = 0x3024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_phy0_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_phy1_csi1phytimer_clk = {
+ .halt_reg = 0x3054,
+ .clkr = {
+ .enable_reg = 0x3054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_phy1_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_phy2_csi2phytimer_clk = {
+ .halt_reg = 0x3084,
+ .clkr = {
+ .enable_reg = 0x3084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_phy2_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi2phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_top_ahb_clk = {
+ .halt_reg = 0x3484,
+ .clkr = {
+ .enable_reg = 0x3484,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_top_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vfe0_clk = {
+ .halt_reg = 0x36a8,
+ .clkr = {
+ .enable_reg = 0x36a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vfe0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vfe1_clk = {
+ .halt_reg = 0x36ac,
+ .clkr = {
+ .enable_reg = 0x36ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vfe1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vfe_ahb_clk = {
+ .halt_reg = 0x36b8,
+ .clkr = {
+ .enable_reg = 0x36b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vfe_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vfe_axi_clk = {
+ .halt_reg = 0x36bc,
+ .clkr = {
+ .enable_reg = 0x36bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vfe_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch fd_ahb_clk = {
+ .halt_reg = 0x3b74,
+ .clkr = {
+ .enable_reg = 0x3b74,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "fd_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch fd_axi_clk = {
+ .halt_reg = 0x3b70,
+ .clkr = {
+ .enable_reg = 0x3b70,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "fd_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch fd_core_clk = {
+ .halt_reg = 0x3b68,
+ .clkr = {
+ .enable_reg = 0x3b68,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "fd_core_clk",
+ .parent_hws = (const struct clk_hw *[]){ &fd_core_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch fd_core_uar_clk = {
+ .halt_reg = 0x3b6c,
+ .clkr = {
+ .enable_reg = 0x3b6c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "fd_core_uar_clk",
+ .parent_hws = (const struct clk_hw *[]){ &fd_core_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_ahb_clk = {
+ .halt_reg = 0x2308,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2308,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_axi_clk = {
+ .halt_reg = 0x2310,
+ .clkr = {
+ .enable_reg = 0x2310,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte0_clk = {
+ .halt_reg = 0x233c,
+ .clkr = {
+ .enable_reg = 0x233c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &byte0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte1_clk = {
+ .halt_reg = 0x2340,
+ .clkr = {
+ .enable_reg = 0x2340,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &byte1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_esc0_clk = {
+ .halt_reg = 0x2344,
+ .clkr = {
+ .enable_reg = 0x2344,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &esc0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_esc1_clk = {
+ .halt_reg = 0x2348,
+ .clkr = {
+ .enable_reg = 0x2348,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &esc1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_extpclk_clk = {
+ .halt_reg = 0x2324,
+ .clkr = {
+ .enable_reg = 0x2324,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_extpclk_clk",
+ .parent_hws = (const struct clk_hw *[]){ &extpclk_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_hdmi_ahb_clk = {
+ .halt_reg = 0x230c,
+ .clkr = {
+ .enable_reg = 0x230c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_hdmi_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_hdmi_clk = {
+ .halt_reg = 0x2338,
+ .clkr = {
+ .enable_reg = 0x2338,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_hdmi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &hdmi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_mdp_clk = {
+ .halt_reg = 0x231c,
+ .clkr = {
+ .enable_reg = 0x231c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mdp_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_pclk0_clk = {
+ .halt_reg = 0x2314,
+ .clkr = {
+ .enable_reg = 0x2314,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &pclk0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_pclk1_clk = {
+ .halt_reg = 0x2318,
+ .clkr = {
+ .enable_reg = 0x2318,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &pclk1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_vsync_clk = {
+ .halt_reg = 0x2328,
+ .clkr = {
+ .enable_reg = 0x2328,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vsync_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_misc_ahb_clk = {
+ .halt_reg = 0x502c,
+ .clkr = {
+ .enable_reg = 0x502c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_misc_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_mmssnoc_axi_clk = {
+ .halt_reg = 0x506c,
+ .clkr = {
+ .enable_reg = 0x506c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_mmssnoc_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ /* Gating this clock will wreck havoc among MMSS! */
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_s0_axi_clk = {
+ .halt_reg = 0x5064,
+ .clkr = {
+ .enable_reg = 0x5064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_s0_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw, },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ocmemcx_ocmemnoc_clk = {
+ .halt_reg = 0x4058,
+ .clkr = {
+ .enable_reg = 0x4058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "ocmemcx_ocmemnoc_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ocmemnoc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch oxili_gfx3d_clk = {
+ .halt_reg = 0x4028,
+ .clkr = {
+ .enable_reg = 0x4028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "oxili_gfx3d_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "oxili_gfx3d_clk_src",
+ .name = "oxili_gfx3d_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch oxili_rbbmtimer_clk = {
+ .halt_reg = 0x40b0,
+ .clkr = {
+ .enable_reg = 0x40b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "oxili_rbbmtimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &rbbmtimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch oxilicx_ahb_clk = {
+ .halt_reg = 0x403c,
+ .clkr = {
+ .enable_reg = 0x403c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "oxilicx_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_ahb_clk = {
+ .halt_reg = 0x1030,
+ .clkr = {
+ .enable_reg = 0x1030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_axi_clk = {
+ .halt_reg = 0x1034,
+ .clkr = {
+ .enable_reg = 0x1034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_ocmemnoc_clk = {
+ .halt_reg = 0x1038,
+ .clkr = {
+ .enable_reg = 0x1038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_ocmemnoc_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ocmemnoc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_vcodec0_clk = {
+ .halt_reg = 0x1028,
+ .clkr = {
+ .enable_reg = 0x1028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_vcodec0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vcodec0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_core0_vcodec_clk = {
+ .halt_reg = 0x1048,
+ .clkr = {
+ .enable_reg = 0x1048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_core0_vcodec_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vcodec0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_core1_vcodec_clk = {
+ .halt_reg = 0x104c,
+ .clkr = {
+ .enable_reg = 0x104c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_core1_vcodec_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vcodec0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_core2_vcodec_clk = {
+ .halt_reg = 0x1054,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_core2_vcodec_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vcodec0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x1024,
+ .cxcs = (unsigned int []){ 0x1038, 0x1034, 0x1048 },
+ .cxc_count = 3,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core0_gdsc = {
+ .gdscr = 0x1040,
+ .cxcs = (unsigned int []){ 0x1048 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
+};
+
+static struct gdsc venus_core1_gdsc = {
+ .gdscr = 0x1044,
+ .cxcs = (unsigned int []){ 0x104c },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
+};
+
+static struct gdsc venus_core2_gdsc = {
+ .gdscr = 0x1050,
+ .cxcs = (unsigned int []){ 0x1054 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x2304,
+ .cxcs = (unsigned int []){ 0x2310, 0x231c },
+ .cxc_count = 2,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_top_gdsc = {
+ .gdscr = 0x34a0,
+ .cxcs = (unsigned int []){ 0x3704, 0x3714, 0x3494 },
+ .cxc_count = 3,
+ .pd = {
+ .name = "camss_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc jpeg_gdsc = {
+ .gdscr = 0x35a4,
+ .cxcs = (unsigned int []){ 0x35a8 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "jpeg_gdsc",
+ },
+ .parent = &camss_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe_gdsc = {
+ .gdscr = 0x36a4,
+ .cxcs = (unsigned int []){ 0x36bc },
+ .cxc_count = 1,
+ .pd = {
+ .name = "vfe_gdsc",
+ },
+ .parent = &camss_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc cpp_gdsc = {
+ .gdscr = 0x36d4,
+ .cxcs = (unsigned int []){ 0x36c4, 0x36b0 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "cpp_gdsc",
+ },
+ .parent = &camss_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc fd_gdsc = {
+ .gdscr = 0x3b64,
+ .cxcs = (unsigned int []){ 0x3b70, 0x3b68 },
+ .pd = {
+ .name = "fd_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxili_cx_gdsc = {
+ .gdscr = 0x4034,
+ .pd = {
+ .name = "oxili_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc oxili_gx_gdsc = {
+ .gdscr = 0x4024,
+ .cxcs = (unsigned int []){ 0x4028 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili_gx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &oxili_cx_gdsc.pd,
+ .flags = CLAMP_IO,
+ .supply = "VDD_GFX",
+};
+
+static struct clk_regmap *mmcc_msm8994_clocks[] = {
+ [MMPLL0_EARLY] = &mmpll0_early.clkr,
+ [MMPLL0_PLL] = &mmpll0.clkr,
+ [MMPLL1_EARLY] = &mmpll1_early.clkr,
+ [MMPLL1_PLL] = &mmpll1.clkr,
+ [MMPLL3_EARLY] = &mmpll3_early.clkr,
+ [MMPLL3_PLL] = &mmpll3.clkr,
+ [MMPLL4_EARLY] = &mmpll4_early.clkr,
+ [MMPLL4_PLL] = &mmpll4.clkr,
+ [MMPLL5_EARLY] = &mmpll5_early.clkr,
+ [MMPLL5_PLL] = &mmpll5.clkr,
+ [AHB_CLK_SRC] = &ahb_clk_src.clkr,
+ [AXI_CLK_SRC] = &axi_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [CSI3_CLK_SRC] = &csi3_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [JPEG1_CLK_SRC] = &jpeg1_clk_src.clkr,
+ [JPEG2_CLK_SRC] = &jpeg2_clk_src.clkr,
+ [CSI2PHYTIMER_CLK_SRC] = &csi2phytimer_clk_src.clkr,
+ [FD_CORE_CLK_SRC] = &fd_core_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [OCMEMNOC_CLK_SRC] = &ocmemnoc_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [MMSS_GP0_CLK_SRC] = &mmss_gp0_clk_src.clkr,
+ [MMSS_GP1_CLK_SRC] = &mmss_gp1_clk_src.clkr,
+ [JPEG_DMA_CLK_SRC] = &jpeg_dma_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [MCLK3_CLK_SRC] = &mclk3_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [MDSS_ESC0_CLK] = &mdss_esc0_clk.clkr,
+ [MDSS_ESC1_CLK] = &mdss_esc1_clk.clkr,
+ [EXTPCLK_CLK_SRC] = &extpclk_clk_src.clkr,
+ [HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
+ [CAMSS_AHB_CLK] = &camss_ahb_clk.clkr,
+ [CAMSS_CCI_CCI_AHB_CLK] = &camss_cci_cci_ahb_clk.clkr,
+ [CAMSS_CCI_CCI_CLK] = &camss_cci_cci_clk.clkr,
+ [CAMSS_VFE_CPP_AHB_CLK] = &camss_vfe_cpp_ahb_clk.clkr,
+ [CAMSS_VFE_CPP_AXI_CLK] = &camss_vfe_cpp_axi_clk.clkr,
+ [CAMSS_VFE_CPP_CLK] = &camss_vfe_cpp_clk.clkr,
+ [CAMSS_CSI0_AHB_CLK] = &camss_csi0_ahb_clk.clkr,
+ [CAMSS_CSI0_CLK] = &camss_csi0_clk.clkr,
+ [CAMSS_CSI0PHY_CLK] = &camss_csi0phy_clk.clkr,
+ [CAMSS_CSI0PIX_CLK] = &camss_csi0pix_clk.clkr,
+ [CAMSS_CSI0RDI_CLK] = &camss_csi0rdi_clk.clkr,
+ [CAMSS_CSI1_AHB_CLK] = &camss_csi1_ahb_clk.clkr,
+ [CAMSS_CSI1_CLK] = &camss_csi1_clk.clkr,
+ [CAMSS_CSI1PHY_CLK] = &camss_csi1phy_clk.clkr,
+ [CAMSS_CSI1PIX_CLK] = &camss_csi1pix_clk.clkr,
+ [CAMSS_CSI1RDI_CLK] = &camss_csi1rdi_clk.clkr,
+ [CAMSS_CSI2_AHB_CLK] = &camss_csi2_ahb_clk.clkr,
+ [CAMSS_CSI2_CLK] = &camss_csi2_clk.clkr,
+ [CAMSS_CSI2PHY_CLK] = &camss_csi2phy_clk.clkr,
+ [CAMSS_CSI2PIX_CLK] = &camss_csi2pix_clk.clkr,
+ [CAMSS_CSI2RDI_CLK] = &camss_csi2rdi_clk.clkr,
+ [CAMSS_CSI3_AHB_CLK] = &camss_csi3_ahb_clk.clkr,
+ [CAMSS_CSI3_CLK] = &camss_csi3_clk.clkr,
+ [CAMSS_CSI3PHY_CLK] = &camss_csi3phy_clk.clkr,
+ [CAMSS_CSI3PIX_CLK] = &camss_csi3pix_clk.clkr,
+ [CAMSS_CSI3RDI_CLK] = &camss_csi3rdi_clk.clkr,
+ [CAMSS_CSI_VFE0_CLK] = &camss_csi_vfe0_clk.clkr,
+ [CAMSS_CSI_VFE1_CLK] = &camss_csi_vfe1_clk.clkr,
+ [CAMSS_GP0_CLK] = &camss_gp0_clk.clkr,
+ [CAMSS_GP1_CLK] = &camss_gp1_clk.clkr,
+ [CAMSS_ISPIF_AHB_CLK] = &camss_ispif_ahb_clk.clkr,
+ [CAMSS_JPEG_DMA_CLK] = &camss_jpeg_dma_clk.clkr,
+ [CAMSS_JPEG_JPEG0_CLK] = &camss_jpeg_jpeg0_clk.clkr,
+ [CAMSS_JPEG_JPEG1_CLK] = &camss_jpeg_jpeg1_clk.clkr,
+ [CAMSS_JPEG_JPEG2_CLK] = &camss_jpeg_jpeg2_clk.clkr,
+ [CAMSS_JPEG_JPEG_AHB_CLK] = &camss_jpeg_jpeg_ahb_clk.clkr,
+ [CAMSS_JPEG_JPEG_AXI_CLK] = &camss_jpeg_jpeg_axi_clk.clkr,
+ [CAMSS_MCLK0_CLK] = &camss_mclk0_clk.clkr,
+ [CAMSS_MCLK1_CLK] = &camss_mclk1_clk.clkr,
+ [CAMSS_MCLK2_CLK] = &camss_mclk2_clk.clkr,
+ [CAMSS_MCLK3_CLK] = &camss_mclk3_clk.clkr,
+ [CAMSS_MICRO_AHB_CLK] = &camss_micro_ahb_clk.clkr,
+ [CAMSS_PHY0_CSI0PHYTIMER_CLK] = &camss_phy0_csi0phytimer_clk.clkr,
+ [CAMSS_PHY1_CSI1PHYTIMER_CLK] = &camss_phy1_csi1phytimer_clk.clkr,
+ [CAMSS_PHY2_CSI2PHYTIMER_CLK] = &camss_phy2_csi2phytimer_clk.clkr,
+ [CAMSS_TOP_AHB_CLK] = &camss_top_ahb_clk.clkr,
+ [CAMSS_VFE_VFE0_CLK] = &camss_vfe_vfe0_clk.clkr,
+ [CAMSS_VFE_VFE1_CLK] = &camss_vfe_vfe1_clk.clkr,
+ [CAMSS_VFE_VFE_AHB_CLK] = &camss_vfe_vfe_ahb_clk.clkr,
+ [CAMSS_VFE_VFE_AXI_CLK] = &camss_vfe_vfe_axi_clk.clkr,
+ [FD_AHB_CLK] = &fd_ahb_clk.clkr,
+ [FD_AXI_CLK] = &fd_axi_clk.clkr,
+ [FD_CORE_CLK] = &fd_core_clk.clkr,
+ [FD_CORE_UAR_CLK] = &fd_core_uar_clk.clkr,
+ [MDSS_AHB_CLK] = &mdss_ahb_clk.clkr,
+ [MDSS_AXI_CLK] = &mdss_axi_clk.clkr,
+ [MDSS_BYTE0_CLK] = &mdss_byte0_clk.clkr,
+ [MDSS_BYTE1_CLK] = &mdss_byte1_clk.clkr,
+ [MDSS_EXTPCLK_CLK] = &mdss_extpclk_clk.clkr,
+ [MDSS_HDMI_AHB_CLK] = &mdss_hdmi_ahb_clk.clkr,
+ [MDSS_HDMI_CLK] = &mdss_hdmi_clk.clkr,
+ [MDSS_MDP_CLK] = &mdss_mdp_clk.clkr,
+ [MDSS_PCLK0_CLK] = &mdss_pclk0_clk.clkr,
+ [MDSS_PCLK1_CLK] = &mdss_pclk1_clk.clkr,
+ [MDSS_VSYNC_CLK] = &mdss_vsync_clk.clkr,
+ [MMSS_MISC_AHB_CLK] = &mmss_misc_ahb_clk.clkr,
+ [MMSS_MMSSNOC_AXI_CLK] = &mmss_mmssnoc_axi_clk.clkr,
+ [MMSS_S0_AXI_CLK] = &mmss_s0_axi_clk.clkr,
+ [OCMEMCX_OCMEMNOC_CLK] = &ocmemcx_ocmemnoc_clk.clkr,
+ [OXILI_GFX3D_CLK] = &oxili_gfx3d_clk.clkr,
+ [OXILI_RBBMTIMER_CLK] = &oxili_rbbmtimer_clk.clkr,
+ [OXILICX_AHB_CLK] = &oxilicx_ahb_clk.clkr,
+ [VENUS0_AHB_CLK] = &venus0_ahb_clk.clkr,
+ [VENUS0_AXI_CLK] = &venus0_axi_clk.clkr,
+ [VENUS0_OCMEMNOC_CLK] = &venus0_ocmemnoc_clk.clkr,
+ [VENUS0_VCODEC0_CLK] = &venus0_vcodec0_clk.clkr,
+ [VENUS0_CORE0_VCODEC_CLK] = &venus0_core0_vcodec_clk.clkr,
+ [VENUS0_CORE1_VCODEC_CLK] = &venus0_core1_vcodec_clk.clkr,
+ [VENUS0_CORE2_VCODEC_CLK] = &venus0_core2_vcodec_clk.clkr,
+};
+
+static struct gdsc *mmcc_msm8994_gdscs[] = {
+ [VENUS_GDSC] = &venus_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VENUS_CORE1_GDSC] = &venus_core1_gdsc,
+ [VENUS_CORE2_GDSC] = &venus_core2_gdsc,
+ [CAMSS_TOP_GDSC] = &camss_top_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [VFE_GDSC] = &vfe_gdsc,
+ [CPP_GDSC] = &cpp_gdsc,
+ [OXILI_GX_GDSC] = &oxili_gx_gdsc,
+ [OXILI_CX_GDSC] = &oxili_cx_gdsc,
+ [FD_GDSC] = &fd_gdsc,
+};
+
+static const struct qcom_reset_map mmcc_msm8994_resets[] = {
+ [CAMSS_MICRO_BCR] = { 0x3490 },
+};
+
+static const struct regmap_config mmcc_msm8994_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x5200,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc mmcc_msm8994_desc = {
+ .config = &mmcc_msm8994_regmap_config,
+ .clks = mmcc_msm8994_clocks,
+ .num_clks = ARRAY_SIZE(mmcc_msm8994_clocks),
+ .resets = mmcc_msm8994_resets,
+ .num_resets = ARRAY_SIZE(mmcc_msm8994_resets),
+ .gdscs = mmcc_msm8994_gdscs,
+ .num_gdscs = ARRAY_SIZE(mmcc_msm8994_gdscs),
+};
+
+static const struct of_device_id mmcc_msm8994_match_table[] = {
+ { .compatible = "qcom,mmcc-msm8992" },
+ { .compatible = "qcom,mmcc-msm8994" }, /* V2 and V2.1 */
+ { }
+};
+MODULE_DEVICE_TABLE(of, mmcc_msm8994_match_table);
+
+static int mmcc_msm8994_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,mmcc-msm8992")) {
+ /* MSM8992 features less clocks and some have different freq tables */
+ mmcc_msm8994_desc.clks[CAMSS_JPEG_JPEG1_CLK] = NULL;
+ mmcc_msm8994_desc.clks[CAMSS_JPEG_JPEG2_CLK] = NULL;
+ mmcc_msm8994_desc.clks[FD_CORE_CLK_SRC] = NULL;
+ mmcc_msm8994_desc.clks[FD_CORE_CLK] = NULL;
+ mmcc_msm8994_desc.clks[FD_CORE_UAR_CLK] = NULL;
+ mmcc_msm8994_desc.clks[FD_AXI_CLK] = NULL;
+ mmcc_msm8994_desc.clks[FD_AHB_CLK] = NULL;
+ mmcc_msm8994_desc.clks[JPEG1_CLK_SRC] = NULL;
+ mmcc_msm8994_desc.clks[JPEG2_CLK_SRC] = NULL;
+ mmcc_msm8994_desc.clks[VENUS0_CORE2_VCODEC_CLK] = NULL;
+
+ mmcc_msm8994_desc.gdscs[FD_GDSC] = NULL;
+ mmcc_msm8994_desc.gdscs[VENUS_CORE2_GDSC] = NULL;
+
+ axi_clk_src.freq_tbl = ftbl_axi_clk_src_8992;
+ cpp_clk_src.freq_tbl = ftbl_cpp_clk_src_8992;
+ csi0_clk_src.freq_tbl = ftbl_csi0_1_2_3_clk_src_8992;
+ csi1_clk_src.freq_tbl = ftbl_csi0_1_2_3_clk_src_8992;
+ csi2_clk_src.freq_tbl = ftbl_csi0_1_2_3_clk_src_8992;
+ csi3_clk_src.freq_tbl = ftbl_csi0_1_2_3_clk_src_8992;
+ mclk0_clk_src.freq_tbl = ftbl_mclk0_clk_src_8992;
+ mclk1_clk_src.freq_tbl = ftbl_mclk1_2_3_clk_src_8992;
+ mclk2_clk_src.freq_tbl = ftbl_mclk1_2_3_clk_src_8992;
+ mclk3_clk_src.freq_tbl = ftbl_mclk1_2_3_clk_src_8992;
+ mdp_clk_src.freq_tbl = ftbl_mdp_clk_src_8992;
+ ocmemnoc_clk_src.freq_tbl = ftbl_ocmemnoc_clk_src_8992;
+ vcodec0_clk_src.freq_tbl = ftbl_vcodec0_clk_src_8992;
+ vfe0_clk_src.freq_tbl = ftbl_vfe0_1_clk_src_8992;
+ vfe1_clk_src.freq_tbl = ftbl_vfe0_1_clk_src_8992;
+ }
+
+ regmap = qcom_cc_map(pdev, &mmcc_msm8994_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_alpha_pll_configure(&mmpll0_early, regmap, &mmpll_p_config);
+ clk_alpha_pll_configure(&mmpll1_early, regmap, &mmpll_p_config);
+ clk_alpha_pll_configure(&mmpll3_early, regmap, &mmpll_p_config);
+ clk_alpha_pll_configure(&mmpll5_early, regmap, &mmpll_p_config);
+
+ return qcom_cc_really_probe(pdev, &mmcc_msm8994_desc, regmap);
+}
+
+static struct platform_driver mmcc_msm8994_driver = {
+ .probe = mmcc_msm8994_probe,
+ .driver = {
+ .name = "mmcc-msm8994",
+ .of_match_table = mmcc_msm8994_match_table,
+ },
+};
+module_platform_driver(mmcc_msm8994_driver);
+
+MODULE_DESCRIPTION("QCOM MMCC MSM8994 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mmcc-msm8994");
diff --git a/drivers/clk/qcom/mss-sc7180.c b/drivers/clk/qcom/mss-sc7180.c
index 673fa1a4f734..5a1407440662 100644
--- a/drivers/clk/qcom/mss-sc7180.c
+++ b/drivers/clk/qcom/mss-sc7180.c
@@ -73,36 +73,23 @@ static int mss_sc7180_probe(struct platform_device *pdev)
{
int ret;
- pm_runtime_enable(&pdev->dev);
- ret = pm_clk_create(&pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
if (ret)
- goto disable_pm_runtime;
+ return ret;
+
+ ret = devm_pm_clk_create(&pdev->dev);
+ if (ret)
+ return ret;
ret = pm_clk_add(&pdev->dev, "cfg_ahb");
if (ret < 0) {
dev_err(&pdev->dev, "failed to acquire iface clock\n");
- goto destroy_pm_clk;
+ return ret;
}
ret = qcom_cc_probe(pdev, &mss_sc7180_desc);
if (ret < 0)
- goto destroy_pm_clk;
-
- return 0;
-
-destroy_pm_clk:
- pm_clk_destroy(&pdev->dev);
-
-disable_pm_runtime:
- pm_runtime_disable(&pdev->dev);
-
- return ret;
-}
-
-static int mss_sc7180_remove(struct platform_device *pdev)
-{
- pm_clk_destroy(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+ return ret;
return 0;
}
@@ -119,7 +106,6 @@ MODULE_DEVICE_TABLE(of, mss_sc7180_match_table);
static struct platform_driver mss_sc7180_driver = {
.probe = mss_sc7180_probe,
- .remove = mss_sc7180_remove,
.driver = {
.name = "sc7180-mss",
.of_match_table = mss_sc7180_match_table,
diff --git a/drivers/clk/qcom/q6sstop-qcs404.c b/drivers/clk/qcom/q6sstop-qcs404.c
index 723f932fbf7d..507386bee07d 100644
--- a/drivers/clk/qcom/q6sstop-qcs404.c
+++ b/drivers/clk/qcom/q6sstop-qcs404.c
@@ -159,15 +159,18 @@ static int q6sstopcc_qcs404_probe(struct platform_device *pdev)
const struct qcom_cc_desc *desc;
int ret;
- pm_runtime_enable(&pdev->dev);
- ret = pm_clk_create(&pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
if (ret)
- goto disable_pm_runtime;
+ return ret;
+
+ ret = devm_pm_clk_create(&pdev->dev);
+ if (ret)
+ return ret;
ret = pm_clk_add(&pdev->dev, NULL);
if (ret < 0) {
dev_err(&pdev->dev, "failed to acquire iface clock\n");
- goto destroy_pm_clk;
+ return ret;
}
q6sstop_regmap_config.name = "q6sstop_tcsr";
@@ -175,30 +178,14 @@ static int q6sstopcc_qcs404_probe(struct platform_device *pdev)
ret = qcom_cc_probe_by_index(pdev, 1, desc);
if (ret)
- goto destroy_pm_clk;
+ return ret;
q6sstop_regmap_config.name = "q6sstop_cc";
desc = &q6sstop_qcs404_desc;
ret = qcom_cc_probe_by_index(pdev, 0, desc);
if (ret)
- goto destroy_pm_clk;
-
- return 0;
-
-destroy_pm_clk:
- pm_clk_destroy(&pdev->dev);
-
-disable_pm_runtime:
- pm_runtime_disable(&pdev->dev);
-
- return ret;
-}
-
-static int q6sstopcc_qcs404_remove(struct platform_device *pdev)
-{
- pm_clk_destroy(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+ return ret;
return 0;
}
@@ -209,7 +196,6 @@ static const struct dev_pm_ops q6sstopcc_pm_ops = {
static struct platform_driver q6sstopcc_qcs404_driver = {
.probe = q6sstopcc_qcs404_probe,
- .remove = q6sstopcc_qcs404_remove,
.driver = {
.name = "qcs404-q6sstopcc",
.of_match_table = q6sstopcc_qcs404_match_table,
diff --git a/drivers/clk/qcom/turingcc-qcs404.c b/drivers/clk/qcom/turingcc-qcs404.c
index 4cfbbf5bf4d9..4543bda793f4 100644
--- a/drivers/clk/qcom/turingcc-qcs404.c
+++ b/drivers/clk/qcom/turingcc-qcs404.c
@@ -110,36 +110,23 @@ static int turingcc_probe(struct platform_device *pdev)
{
int ret;
- pm_runtime_enable(&pdev->dev);
- ret = pm_clk_create(&pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
if (ret)
- goto disable_pm_runtime;
+ return ret;
+
+ ret = devm_pm_clk_create(&pdev->dev);
+ if (ret)
+ return ret;
ret = pm_clk_add(&pdev->dev, NULL);
if (ret < 0) {
dev_err(&pdev->dev, "failed to acquire iface clock\n");
- goto destroy_pm_clk;
+ return ret;
}
ret = qcom_cc_probe(pdev, &turingcc_desc);
if (ret < 0)
- goto destroy_pm_clk;
-
- return 0;
-
-destroy_pm_clk:
- pm_clk_destroy(&pdev->dev);
-
-disable_pm_runtime:
- pm_runtime_disable(&pdev->dev);
-
- return ret;
-}
-
-static int turingcc_remove(struct platform_device *pdev)
-{
- pm_clk_destroy(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+ return ret;
return 0;
}
@@ -156,7 +143,6 @@ MODULE_DEVICE_TABLE(of, turingcc_match_table);
static struct platform_driver turingcc_driver = {
.probe = turingcc_probe,
- .remove = turingcc_remove,
.driver = {
.name = "qcs404-turingcc",
.of_match_table = turingcc_match_table,
diff --git a/drivers/clk/qcom/videocc-sc7280.c b/drivers/clk/qcom/videocc-sc7280.c
new file mode 100644
index 000000000000..615695d82319
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sc7280.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,videocc-sc7280.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "common.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+ P_BI_TCXO,
+ P_SLEEP_CLK,
+ P_VIDEO_PLL0_OUT_EVEN,
+};
+
+static const struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+/* 400MHz Configuration */
+static const struct alpha_pll_config video_pll0_config = {
+ .l = 0x14,
+ .alpha = 0xD555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll video_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "video_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL0_OUT_EVEN, 3 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &video_pll0.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct freq_tbl ftbl_video_cc_iris_clk_src[] = {
+ F(133333333, P_VIDEO_PLL0_OUT_EVEN, 3, 0, 0),
+ F(240000000, P_VIDEO_PLL0_OUT_EVEN, 2, 0, 0),
+ F(335000000, P_VIDEO_PLL0_OUT_EVEN, 2, 0, 0),
+ F(424000000, P_VIDEO_PLL0_OUT_EVEN, 2, 0, 0),
+ F(460000000, P_VIDEO_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_iris_clk_src = {
+ .cmd_rcgr = 0x1000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_iris_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_cc_iris_clk_src",
+ .parent_data = video_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x701c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch video_cc_iris_ahb_clk = {
+ .halt_reg = 0x5004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_iris_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0_axi_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_mvs0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0_core_clk = {
+ .halt_reg = 0x3010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_mvs0_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvsc_core_clk = {
+ .halt_reg = 0x2014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_mvsc_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvsc_ctl_axi_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_mvsc_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_sleep_clk = {
+ .halt_reg = 0x7034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &video_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ahb_clk = {
+ .halt_reg = 0x801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mvs0_gdsc = {
+ .gdscr = 0x3004,
+ .pd = {
+ .name = "mvs0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc mvsc_gdsc = {
+ .gdscr = 0x2004,
+ .pd = {
+ .name = "mvsc_gdsc",
+ },
+ .flags = RETAIN_FF_ENABLE,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *video_cc_sc7280_clocks[] = {
+ [VIDEO_CC_IRIS_AHB_CLK] = &video_cc_iris_ahb_clk.clkr,
+ [VIDEO_CC_IRIS_CLK_SRC] = &video_cc_iris_clk_src.clkr,
+ [VIDEO_CC_MVS0_AXI_CLK] = &video_cc_mvs0_axi_clk.clkr,
+ [VIDEO_CC_MVS0_CORE_CLK] = &video_cc_mvs0_core_clk.clkr,
+ [VIDEO_CC_MVSC_CORE_CLK] = &video_cc_mvsc_core_clk.clkr,
+ [VIDEO_CC_MVSC_CTL_AXI_CLK] = &video_cc_mvsc_ctl_axi_clk.clkr,
+ [VIDEO_CC_SLEEP_CLK] = &video_cc_sleep_clk.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_VENUS_AHB_CLK] = &video_cc_venus_ahb_clk.clkr,
+ [VIDEO_PLL0] = &video_pll0.clkr,
+};
+
+static struct gdsc *video_cc_sc7280_gdscs[] = {
+ [MVS0_GDSC] = &mvs0_gdsc,
+ [MVSC_GDSC] = &mvsc_gdsc,
+};
+
+static const struct regmap_config video_cc_sc7280_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xb000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc video_cc_sc7280_desc = {
+ .config = &video_cc_sc7280_regmap_config,
+ .clks = video_cc_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sc7280_clocks),
+ .gdscs = video_cc_sc7280_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sc7280_gdscs),
+};
+
+static const struct of_device_id video_cc_sc7280_match_table[] = {
+ { .compatible = "qcom,sc7280-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sc7280_match_table);
+
+static int video_cc_sc7280_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &video_cc_sc7280_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_pll_configure(&video_pll0, regmap, &video_pll0_config);
+
+ return qcom_cc_really_probe(pdev, &video_cc_sc7280_desc, regmap);
+}
+
+static struct platform_driver video_cc_sc7280_driver = {
+ .probe = video_cc_sc7280_probe,
+ .driver = {
+ .name = "video_cc-sc7280",
+ .of_match_table = video_cc_sc7280_match_table,
+ },
+};
+
+static int __init video_cc_sc7280_init(void)
+{
+ return platform_driver_register(&video_cc_sc7280_driver);
+}
+subsys_initcall(video_cc_sc7280_init);
+
+static void __exit video_cc_sc7280_exit(void)
+{
+ platform_driver_unregister(&video_cc_sc7280_driver);
+}
+module_exit(video_cc_sc7280_exit);
+
+MODULE_DESCRIPTION("QTI VIDEO_CC sc7280 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/ralink/clk-mt7621.c b/drivers/clk/ralink/clk-mt7621.c
index 857da1e274be..a2c045390f00 100644
--- a/drivers/clk/ralink/clk-mt7621.c
+++ b/drivers/clk/ralink/clk-mt7621.c
@@ -131,14 +131,7 @@ static int mt7621_gate_ops_init(struct device *dev,
struct mt7621_gate *sclk)
{
struct clk_init_data init = {
- /*
- * Until now no clock driver existed so
- * these SoC drivers are not prepared
- * yet for the clock. We don't want kernel to
- * disable anything so we add CLK_IS_CRITICAL
- * flag here.
- */
- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .flags = CLK_SET_RATE_PARENT,
.num_parents = 1,
.parent_names = &sclk->parent_name,
.ops = &mt7621_gate_ops,
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 7b450650bcae..6d0280751bb1 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -153,9 +153,7 @@ config CLK_R8A779A0
select CLK_RENESAS_CPG_MSSR
config CLK_R9A06G032
- bool "Renesas R9A06G032 clock driver"
- help
- This is a driver for R9A06G032 clocks
+ bool "RZ/N1D clock support" if COMPILE_TEST
config CLK_R9A07G044
bool "RZ/G2L clock support" if COMPILE_TEST
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 5c6c5c721d98..7d018700d08b 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -37,7 +37,7 @@ obj-$(CONFIG_CLK_RCAR_CPG_LIB) += rcar-cpg-lib.o
obj-$(CONFIG_CLK_RCAR_GEN2_CPG) += rcar-gen2-cpg.o
obj-$(CONFIG_CLK_RCAR_GEN3_CPG) += rcar-gen3-cpg.o
obj-$(CONFIG_CLK_RCAR_USB2_CLOCK_SEL) += rcar-usb2-clock-sel.o
-obj-$(CONFIG_CLK_RZG2L) += renesas-rzg2l-cpg.o
+obj-$(CONFIG_CLK_RZG2L) += rzg2l-cpg.o
# Generic
obj-$(CONFIG_CLK_RENESAS_CPG_MSSR) += renesas-cpg-mssr.o
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
index 4a43ebec7d5e..39b185d8e957 100644
--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
@@ -210,7 +210,7 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
DEF_MOD("rpc-if", 917, R8A774A1_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A774A1_CLK_S0D6),
DEF_MOD("i2c5", 919, R8A774A1_CLK_S0D6),
- DEF_MOD("i2c-dvfs", 926, R8A774A1_CLK_CP),
+ DEF_MOD("iic-pmic", 926, R8A774A1_CLK_CP),
DEF_MOD("i2c4", 927, R8A774A1_CLK_S0D6),
DEF_MOD("i2c3", 928, R8A774A1_CLK_S0D6),
DEF_MOD("i2c2", 929, R8A774A1_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a774b1-cpg-mssr.c b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
index 6f04c40fe237..af602d83c8ce 100644
--- a/drivers/clk/renesas/r8a774b1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
@@ -206,7 +206,7 @@ static const struct mssr_mod_clk r8a774b1_mod_clks[] __initconst = {
DEF_MOD("rpc-if", 917, R8A774B1_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A774B1_CLK_S0D6),
DEF_MOD("i2c5", 919, R8A774B1_CLK_S0D6),
- DEF_MOD("i2c-dvfs", 926, R8A774B1_CLK_CP),
+ DEF_MOD("iic-pmic", 926, R8A774B1_CLK_CP),
DEF_MOD("i2c4", 927, R8A774B1_CLK_S0D6),
DEF_MOD("i2c3", 928, R8A774B1_CLK_S0D6),
DEF_MOD("i2c2", 929, R8A774B1_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
index ed3a2cf0e0bb..5b938eb2df25 100644
--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
@@ -210,7 +210,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
DEF_MOD("rpc-if", 917, R8A774C0_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A774C0_CLK_S3D2),
DEF_MOD("i2c5", 919, R8A774C0_CLK_S3D2),
- DEF_MOD("i2c-dvfs", 926, R8A774C0_CLK_CP),
+ DEF_MOD("iic-pmic", 926, R8A774C0_CLK_CP),
DEF_MOD("i2c4", 927, R8A774C0_CLK_S3D2),
DEF_MOD("i2c3", 928, R8A774C0_CLK_S3D2),
DEF_MOD("i2c2", 929, R8A774C0_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a774e1-cpg-mssr.c b/drivers/clk/renesas/r8a774e1-cpg-mssr.c
index b96c486abb44..40c71466df37 100644
--- a/drivers/clk/renesas/r8a774e1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774e1-cpg-mssr.c
@@ -219,7 +219,7 @@ static const struct mssr_mod_clk r8a774e1_mod_clks[] __initconst = {
DEF_MOD("i2c6", 918, R8A774E1_CLK_S0D6),
DEF_MOD("i2c5", 919, R8A774E1_CLK_S0D6),
DEF_MOD("adg", 922, R8A774E1_CLK_S0D1),
- DEF_MOD("i2c-dvfs", 926, R8A774E1_CLK_CP),
+ DEF_MOD("iic-pmic", 926, R8A774E1_CLK_CP),
DEF_MOD("i2c4", 927, R8A774E1_CLK_S0D6),
DEF_MOD("i2c3", 928, R8A774E1_CLK_S0D6),
DEF_MOD("i2c2", 929, R8A774E1_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index acaf5a93f1d3..f16d125ca009 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -135,7 +135,6 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_FIXED("zt", R8A779A0_CLK_ZT, CLK_PLL1_DIV2, 2, 1),
DEF_FIXED("ztr", R8A779A0_CLK_ZTR, CLK_PLL1_DIV2, 2, 1),
DEF_FIXED("zr", R8A779A0_CLK_ZR, CLK_PLL1_DIV2, 1, 1),
- DEF_FIXED("dsi", R8A779A0_CLK_DSI, CLK_PLL5_DIV4, 1, 1),
DEF_FIXED("cnndsp", R8A779A0_CLK_CNNDSP, CLK_PLL5_DIV4, 1, 1),
DEF_FIXED("vip", R8A779A0_CLK_VIP, CLK_PLL5, 5, 1),
DEF_FIXED("adgh", R8A779A0_CLK_ADGH, CLK_PLL5_DIV4, 1, 1),
@@ -151,6 +150,7 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, 0x880),
+ DEF_DIV6P1("dsi", R8A779A0_CLK_DSI, CLK_PLL5_DIV4, 0x884),
DEF_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8),
DEF_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
@@ -167,6 +167,9 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
DEF_MOD("csi41", 400, R8A779A0_CLK_CSI0),
DEF_MOD("csi42", 401, R8A779A0_CLK_CSI0),
DEF_MOD("csi43", 402, R8A779A0_CLK_CSI0),
+ DEF_MOD("du", 411, R8A779A0_CLK_S3D1),
+ DEF_MOD("dsi0", 415, R8A779A0_CLK_DSI),
+ DEF_MOD("dsi1", 416, R8A779A0_CLK_DSI),
DEF_MOD("fcpvd0", 508, R8A779A0_CLK_S3D1),
DEF_MOD("fcpvd1", 509, R8A779A0_CLK_S3D1),
DEF_MOD("hscif0", 514, R8A779A0_CLK_S1D2),
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index ae24e0397d3c..4c94b94c4125 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -12,11 +12,11 @@
#include <dt-bindings/clock/r9a07g044-cpg.h>
-#include "renesas-rzg2l-cpg.h"
+#include "rzg2l-cpg.h"
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A07G044_OSCCLK,
+ LAST_DT_CORE_CLK = R9A07G044_CLK_P0_DIV2,
/* External Input Clocks */
CLK_EXTAL,
@@ -37,6 +37,7 @@ enum clk_ids {
CLK_PLL5,
CLK_PLL5_DIV2,
CLK_PLL6,
+ CLK_P1_DIV2,
/* Module Clocks */
MOD_CLK_BASE,
@@ -76,9 +77,11 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
DEF_FIXED("I", R9A07G044_CLK_I, CLK_PLL1, 1, 1),
DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV16, DIVPL2A,
dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+ DEF_FIXED("P0_DIV2", R9A07G044_CLK_P0_DIV2, R9A07G044_CLK_P0, 1, 2),
DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV20, 1, 1),
DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4,
DIVPL3B, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+ DEF_FIXED("P1_DIV2", CLK_P1_DIV2, R9A07G044_CLK_P1, 1, 2),
DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2,
DIVPL3A, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
};
@@ -90,6 +93,42 @@ static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
0x518, 0),
DEF_MOD("ia55_clk", R9A07G044_IA55_CLK, R9A07G044_CLK_P1,
0x518, 1),
+ DEF_MOD("dmac_aclk", R9A07G044_DMAC_ACLK, R9A07G044_CLK_P1,
+ 0x52c, 0),
+ DEF_MOD("dmac_pclk", R9A07G044_DMAC_PCLK, CLK_P1_DIV2,
+ 0x52c, 1),
+ DEF_MOD("ssi0_pclk", R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0,
+ 0x570, 0),
+ DEF_MOD("ssi0_sfr", R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0,
+ 0x570, 1),
+ DEF_MOD("ssi1_pclk", R9A07G044_SSI1_PCLK2, R9A07G044_CLK_P0,
+ 0x570, 2),
+ DEF_MOD("ssi1_sfr", R9A07G044_SSI1_PCLK_SFR, R9A07G044_CLK_P0,
+ 0x570, 3),
+ DEF_MOD("ssi2_pclk", R9A07G044_SSI2_PCLK2, R9A07G044_CLK_P0,
+ 0x570, 4),
+ DEF_MOD("ssi2_sfr", R9A07G044_SSI2_PCLK_SFR, R9A07G044_CLK_P0,
+ 0x570, 5),
+ DEF_MOD("ssi3_pclk", R9A07G044_SSI3_PCLK2, R9A07G044_CLK_P0,
+ 0x570, 6),
+ DEF_MOD("ssi3_sfr", R9A07G044_SSI3_PCLK_SFR, R9A07G044_CLK_P0,
+ 0x570, 7),
+ DEF_MOD("usb0_host", R9A07G044_USB_U2H0_HCLK, R9A07G044_CLK_P1,
+ 0x578, 0),
+ DEF_MOD("usb1_host", R9A07G044_USB_U2H1_HCLK, R9A07G044_CLK_P1,
+ 0x578, 1),
+ DEF_MOD("usb0_func", R9A07G044_USB_U2P_EXR_CPUCLK, R9A07G044_CLK_P1,
+ 0x578, 2),
+ DEF_MOD("usb_pclk", R9A07G044_USB_PCLK, R9A07G044_CLK_P1,
+ 0x578, 3),
+ DEF_MOD("i2c0", R9A07G044_I2C0_PCLK, R9A07G044_CLK_P0,
+ 0x580, 0),
+ DEF_MOD("i2c1", R9A07G044_I2C1_PCLK, R9A07G044_CLK_P0,
+ 0x580, 1),
+ DEF_MOD("i2c2", R9A07G044_I2C2_PCLK, R9A07G044_CLK_P0,
+ 0x580, 2),
+ DEF_MOD("i2c3", R9A07G044_I2C3_PCLK, R9A07G044_CLK_P0,
+ 0x580, 3),
DEF_MOD("scif0", R9A07G044_SCIF0_CLK_PCK, R9A07G044_CLK_P0,
0x584, 0),
DEF_MOD("scif1", R9A07G044_SCIF1_CLK_PCK, R9A07G044_CLK_P0,
@@ -102,18 +141,47 @@ static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
0x584, 4),
DEF_MOD("sci0", R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0,
0x588, 0),
+ DEF_MOD("canfd", R9A07G044_CANFD_PCLK, R9A07G044_CLK_P0,
+ 0x594, 0),
+ DEF_MOD("gpio", R9A07G044_GPIO_HCLK, R9A07G044_OSCCLK,
+ 0x598, 0),
+ DEF_MOD("adc_adclk", R9A07G044_ADC_ADCLK, R9A07G044_CLK_TSU,
+ 0x5a8, 0),
+ DEF_MOD("adc_pclk", R9A07G044_ADC_PCLK, R9A07G044_CLK_P0,
+ 0x5a8, 1),
};
static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_GIC600_GICRESET_N, 0x814, 0),
DEF_RST(R9A07G044_GIC600_DBG_GICRESET_N, 0x814, 1),
DEF_RST(R9A07G044_IA55_RESETN, 0x818, 0),
+ DEF_RST(R9A07G044_DMAC_ARESETN, 0x82c, 0),
+ DEF_RST(R9A07G044_DMAC_RST_ASYNC, 0x82c, 1),
+ DEF_RST(R9A07G044_SSI0_RST_M2_REG, 0x870, 0),
+ DEF_RST(R9A07G044_SSI1_RST_M2_REG, 0x870, 1),
+ DEF_RST(R9A07G044_SSI2_RST_M2_REG, 0x870, 2),
+ DEF_RST(R9A07G044_SSI3_RST_M2_REG, 0x870, 3),
+ DEF_RST(R9A07G044_USB_U2H0_HRESETN, 0x878, 0),
+ DEF_RST(R9A07G044_USB_U2H1_HRESETN, 0x878, 1),
+ DEF_RST(R9A07G044_USB_U2P_EXL_SYSRST, 0x878, 2),
+ DEF_RST(R9A07G044_USB_PRESETN, 0x878, 3),
+ DEF_RST(R9A07G044_I2C0_MRST, 0x880, 0),
+ DEF_RST(R9A07G044_I2C1_MRST, 0x880, 1),
+ DEF_RST(R9A07G044_I2C2_MRST, 0x880, 2),
+ DEF_RST(R9A07G044_I2C3_MRST, 0x880, 3),
DEF_RST(R9A07G044_SCIF0_RST_SYSTEM_N, 0x884, 0),
DEF_RST(R9A07G044_SCIF1_RST_SYSTEM_N, 0x884, 1),
DEF_RST(R9A07G044_SCIF2_RST_SYSTEM_N, 0x884, 2),
DEF_RST(R9A07G044_SCIF3_RST_SYSTEM_N, 0x884, 3),
DEF_RST(R9A07G044_SCIF4_RST_SYSTEM_N, 0x884, 4),
DEF_RST(R9A07G044_SCI0_RST, 0x888, 0),
+ DEF_RST(R9A07G044_CANFD_RSTP_N, 0x894, 0),
+ DEF_RST(R9A07G044_CANFD_RSTC_N, 0x894, 1),
+ DEF_RST(R9A07G044_GPIO_RSTN, 0x898, 0),
+ DEF_RST(R9A07G044_GPIO_PORT_RESETN, 0x898, 1),
+ DEF_RST(R9A07G044_GPIO_SPARE_RESETN, 0x898, 2),
+ DEF_RST(R9A07G044_ADC_PRESETN, 0x8a8, 0),
+ DEF_RST(R9A07G044_ADC_ADRST_N, 0x8a8, 1),
};
static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
diff --git a/drivers/clk/renesas/renesas-rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index e7c59af2a1d8..3b3b2c3347f3 100644
--- a/drivers/clk/renesas/renesas-rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -29,7 +29,7 @@
#include <dt-bindings/clock/renesas-cpg-mssr.h>
-#include "renesas-rzg2l-cpg.h"
+#include "rzg2l-cpg.h"
#ifdef DEBUG
#define WARN_DEBUG(x) WARN_ON(x)
@@ -125,7 +125,7 @@ rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
core->flag, &priv->rmw_lock);
if (IS_ERR(clk_hw))
- return NULL;
+ return ERR_CAST(clk_hw);
return clk_hw->clk;
}
@@ -175,17 +175,14 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
struct clk_init_data init;
const char *parent_name;
struct pll_clk *pll_clk;
- struct clk *clk;
parent = clks[core->parent & 0xffff];
if (IS_ERR(parent))
return ERR_CAST(parent);
pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
- if (!pll_clk) {
- clk = ERR_PTR(-ENOMEM);
- return NULL;
- }
+ if (!pll_clk)
+ return ERR_PTR(-ENOMEM);
parent_name = __clk_get_name(parent);
init.name = core->name;
@@ -200,11 +197,7 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
pll_clk->priv = priv;
pll_clk->type = core->type;
- clk = clk_register(NULL, &pll_clk->hw);
- if (IS_ERR(clk))
- kfree(pll_clk);
-
- return clk;
+ return clk_register(NULL, &pll_clk->hw);
}
static struct clk
@@ -229,7 +222,7 @@ static struct clk
case CPG_MOD:
type = "module";
- if (clkidx > priv->num_mod_clks) {
+ if (clkidx >= priv->num_mod_clks) {
dev_err(dev, "Invalid %s clock index %u\n", type,
clkidx);
return ERR_PTR(-EINVAL);
@@ -297,7 +290,7 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
break;
default:
goto fail;
- };
+ }
if (IS_ERR_OR_NULL(clk))
goto fail;
@@ -473,7 +466,6 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
fail:
dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
mod->name, PTR_ERR(clk));
- kfree(clock);
}
#define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
diff --git a/drivers/clk/renesas/renesas-rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index 63695280ce8b..63695280ce8b 100644
--- a/drivers/clk/renesas/renesas-rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index fe937bcdb487..f7827b3b7fc1 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -940,7 +940,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
switch (pll_type) {
case pll_rk3036:
case pll_rk3328:
- if (!pll->rate_table || IS_ERR(ctx->grf))
+ if (!pll->rate_table)
init.ops = &rockchip_rk3036_pll_clk_norate_ops;
else
init.ops = &rockchip_rk3036_pll_clk_ops;
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index 614845cc5b4a..d644bc155ec6 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -121,6 +121,7 @@ PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" };
PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" };
PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" };
+PNAME(mux_pll_src_dmyapll_dpll_gpll_xin24_p) = { "dummy_apll", "dpll", "gpll", "xin24m" };
PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" };
PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
@@ -340,7 +341,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(16), 8, 2, MFLAGS, 10, 5, DFLAGS,
RK2928_CLKGATE_CON(10), 4, GFLAGS),
- COMPOSITE(SCLK_SFC, "sclk_sfc", mux_pll_src_apll_dpll_gpll_usb480m_p, 0,
+ COMPOSITE(SCLK_SFC, "sclk_sfc", mux_pll_src_dmyapll_dpll_gpll_xin24_p, 0,
RK2928_CLKSEL_CON(16), 0, 2, MFLAGS, 2, 5, DFLAGS,
RK2928_CLKGATE_CON(10), 5, GFLAGS),
@@ -403,7 +404,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 13, GFLAGS),
GATE(HCLK_OTG1, "hclk_otg1", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(7), 3, GFLAGS),
GATE(HCLK_I2S, "hclk_i2s", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
- GATE(0, "hclk_sfc", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 14, GFLAGS),
+ GATE(HCLK_SFC, "hclk_sfc", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 14, GFLAGS),
GATE(HCLK_MAC, "hclk_mac", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 5, GFLAGS),
/* pclk_peri gates */
diff --git a/drivers/clk/rockchip/clk-rk3308.c b/drivers/clk/rockchip/clk-rk3308.c
index 2c3bd0c749f2..db3396c3e6e9 100644
--- a/drivers/clk/rockchip/clk-rk3308.c
+++ b/drivers/clk/rockchip/clk-rk3308.c
@@ -911,6 +911,7 @@ static const char *const rk3308_critical_clocks[] __initconst = {
"hclk_audio",
"pclk_audio",
"sclk_ddrc",
+ "clk_ddrphy4x",
};
static void __init rk3308_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 049e5e0b64f6..b7be7e11b0df 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -22,6 +22,8 @@
#include <linux/regmap.h>
#include <linux/reboot.h>
#include <linux/rational.h>
+
+#include "../clk-fractional-divider.h"
#include "clk.h"
/*
@@ -178,10 +180,8 @@ static void rockchip_fractional_approximation(struct clk_hw *hw,
unsigned long rate, unsigned long *parent_rate,
unsigned long *m, unsigned long *n)
{
- struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long p_rate, p_parent_rate;
struct clk_hw *p_parent;
- unsigned long scale;
p_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
if ((rate * 20 > p_rate) && (p_rate % rate != 0)) {
@@ -190,18 +190,7 @@ static void rockchip_fractional_approximation(struct clk_hw *hw,
*parent_rate = p_parent_rate;
}
- /*
- * Get rate closer to *parent_rate to guarantee there is no overflow
- * for m and n. In the result it will be the nearest rate left shifted
- * by (scale - fd->nwidth) bits.
- */
- scale = fls_long(*parent_rate / rate - 1);
- if (scale > fd->nwidth)
- rate <<= scale - fd->nwidth;
-
- rational_best_approximation(rate, *parent_rate,
- GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
- m, n);
+ clk_fractional_divider_general_approximation(hw, rate, parent_rate, m, n);
}
static struct clk *rockchip_clk_register_frac_branch(
diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
index 1cb21ea79c64..242e94c0cf8a 100644
--- a/drivers/clk/socfpga/clk-agilex.c
+++ b/drivers/clk/socfpga/clk-agilex.c
@@ -107,10 +107,10 @@ static const struct clk_parent_data gpio_db_free_mux[] = {
};
static const struct clk_parent_data psi_ref_free_mux[] = {
- { .fw_name = "main_pll_c3",
- .name = "main_pll_c3", },
- { .fw_name = "peri_pll_c3",
- .name = "peri_pll_c3", },
+ { .fw_name = "main_pll_c2",
+ .name = "main_pll_c2", },
+ { .fw_name = "peri_pll_c2",
+ .name = "peri_pll_c2", },
{ .fw_name = "osc1",
.name = "osc1", },
{ .fw_name = "cb-intosc-hs-div2-clk",
@@ -195,6 +195,13 @@ static const struct clk_parent_data sdmmc_mux[] = {
.name = "boot_clk", },
};
+static const struct clk_parent_data s2f_user0_mux[] = {
+ { .fw_name = "s2f_user0_free_clk",
+ .name = "s2f_user0_free_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
static const struct clk_parent_data s2f_user1_mux[] = {
{ .fw_name = "s2f_user1_free_clk",
.name = "s2f_user1_free_clk", },
@@ -273,7 +280,7 @@ static const struct stratix10_perip_cnt_clock agilex_main_perip_cnt_clks[] = {
{ AGILEX_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
ARRAY_SIZE(sdmmc_free_mux), 0, 0xE4, 0, 0, 0},
{ AGILEX_S2F_USER0_FREE_CLK, "s2f_user0_free_clk", NULL, s2f_usr0_free_mux,
- ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0, 0},
+ ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0x30, 2},
{ AGILEX_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", NULL, s2f_usr1_free_mux,
ARRAY_SIZE(s2f_usr1_free_mux), 0, 0xEC, 0, 0x88, 5},
{ AGILEX_PSI_REF_FREE_CLK, "psi_ref_free_clk", NULL, psi_ref_free_mux,
@@ -319,6 +326,8 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
4, 0x98, 0, 16, 0x88, 3, 0},
{ AGILEX_SDMMC_CLK, "sdmmc_clk", NULL, sdmmc_mux, ARRAY_SIZE(sdmmc_mux), 0, 0x7C,
5, 0, 0, 0, 0x88, 4, 4},
+ { AGILEX_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_user0_mux, ARRAY_SIZE(s2f_user0_mux), 0, 0x24,
+ 6, 0, 0, 0, 0x30, 2, 0},
{ AGILEX_S2F_USER1_CLK, "s2f_user1_clk", NULL, s2f_user1_mux, ARRAY_SIZE(s2f_user1_mux), 0, 0x7C,
6, 0, 0, 0, 0x88, 5, 0},
{ AGILEX_PSI_REF_CLK, "psi_ref_clk", NULL, psi_mux, ARRAY_SIZE(psi_mux), 0, 0x7C,
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index a5f526bb0483..6144447f86c6 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1377,7 +1377,7 @@ static void dfll_debug_init(struct tegra_dfll *td)
}
#else
-static void inline dfll_debug_init(struct tegra_dfll *td) { }
+static inline void dfll_debug_init(struct tegra_dfll *td) { }
#endif /* CONFIG_DEBUG_FS */
/*
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 292d6269daf1..4dcf7f7cb8a0 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -777,11 +777,7 @@ static struct tegra_periph_init_data gate_clks[] = {
GATE("ahbdma", "hclk", 33, 0, tegra_clk_ahbdma, 0),
GATE("apbdma", "pclk", 34, 0, tegra_clk_apbdma, 0),
GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0),
- /*
- * Critical for RAM re-repair operation, which must occur on resume
- * from LP1 system suspend and as part of CCPLEX cluster switching.
- */
- GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, CLK_IS_CRITICAL),
+ GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0),
GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0),
GATE("kfuse", "clk_m", 40, TEGRA_PERIPH_ON_APB, tegra_clk_kfuse, 0),
GATE("apbif", "clk_m", 107, TEGRA_PERIPH_ON_APB, tegra_clk_apbif, 0),
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index 0aedd42fad52..528c5bb397cc 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -99,10 +99,11 @@ static void u8500_clk_init(struct device_node *np)
if (fw_version != NULL) {
switch (fw_version->project) {
case PRCMU_FW_PROJECT_U8500_C2:
- case PRCMU_FW_PROJECT_U8500_MBL:
+ case PRCMU_FW_PROJECT_U8500_SSG1:
case PRCMU_FW_PROJECT_U8520:
case PRCMU_FW_PROJECT_U8420:
case PRCMU_FW_PROJECT_U8420_SYSCLK:
+ case PRCMU_FW_PROJECT_U8500_SSG2:
sgaclk_parent = "soc0_pll";
break;
default:
diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile
index 18564efdc651..1244c4e568ff 100644
--- a/drivers/clk/x86/Makefile
+++ b/drivers/clk/x86/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_PMC_ATOM) += clk-pmc-atom.o
obj-$(CONFIG_X86_AMD_PLATFORM_DEVICE) += clk-fch.o
-clk-x86-lpss-objs := clk-lpt.o
+clk-x86-lpss-y := clk-lpss-atom.o
obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o
obj-$(CONFIG_CLK_LGM_CGU) += clk-cgu.o clk-cgu-pll.o clk-lgm.o
diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpss-atom.c
index fbe9fd3ed948..aa9d0bb98f8b 100644
--- a/drivers/clk/x86/clk-lpt.c
+++ b/drivers/clk/x86/clk-lpss-atom.c
@@ -13,7 +13,7 @@
#include <linux/platform_data/x86/clk-lpss.h>
#include <linux/platform_device.h>
-static int lpt_clk_probe(struct platform_device *pdev)
+static int lpss_atom_clk_probe(struct platform_device *pdev)
{
struct lpss_clk_data *drvdata;
struct clk *clk;
@@ -34,14 +34,14 @@ static int lpt_clk_probe(struct platform_device *pdev)
return 0;
}
-static struct platform_driver lpt_clk_driver = {
+static struct platform_driver lpss_atom_clk_driver = {
.driver = {
- .name = "clk-lpt",
+ .name = "clk-lpss-atom",
},
- .probe = lpt_clk_probe,
+ .probe = lpss_atom_clk_probe,
};
-int __init lpt_clk_init(void)
+int __init lpss_atom_clk_init(void)
{
- return platform_driver_register(&lpt_clk_driver);
+ return platform_driver_register(&lpss_atom_clk_driver);
}
diff --git a/drivers/clk/zynqmp/clk-gate-zynqmp.c b/drivers/clk/zynqmp/clk-gate-zynqmp.c
index 695feaa82da5..565ed67a0430 100644
--- a/drivers/clk/zynqmp/clk-gate-zynqmp.c
+++ b/drivers/clk/zynqmp/clk-gate-zynqmp.c
@@ -12,7 +12,7 @@
#include "clk-zynqmp.h"
/**
- * struct clk_gate - gating clock
+ * struct zynqmp_clk_gate - gating clock
* @hw: handle between common and hardware-specific interfaces
* @flags: hardware-specific flags
* @clk_id: Id of clock
@@ -66,7 +66,7 @@ static void zynqmp_clk_gate_disable(struct clk_hw *hw)
}
/**
- * zynqmp_clk_gate_is_enable() - Check clock state
+ * zynqmp_clk_gate_is_enabled() - Check clock state
* @hw: handle between common and hardware-specific interfaces
*
* Return: 1 if enabled, 0 if disabled else error code
diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c
index 157d4a960bf4..17afce594f28 100644
--- a/drivers/clk/zynqmp/clk-mux-zynqmp.c
+++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c
@@ -159,7 +159,7 @@ struct clk_hw *zynqmp_clk_register_mux(const char *name, u32 clk_id,
hw = &mux->hw;
ret = clk_hw_register(NULL, hw);
if (ret) {
- kfree(hw);
+ kfree(mux);
hw = ERR_PTR(ret);
}
diff --git a/drivers/clk/zynqmp/clk-zynqmp.h b/drivers/clk/zynqmp/clk-zynqmp.h
index 84fa80a969a9..60cbc0674a9e 100644
--- a/drivers/clk/zynqmp/clk-zynqmp.h
+++ b/drivers/clk/zynqmp/clk-zynqmp.h
@@ -56,6 +56,7 @@ enum topology_type {
* @type: Type of topology
* @flag: Topology flags
* @type_flag: Topology type specific flag
+ * @custom_type_flag: Topology type specific custom flag
*/
struct clock_topology {
u32 type;
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index 871184e406e1..eb25303eefed 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -762,9 +762,7 @@ static int zynqmp_clk_setup(struct device_node *np)
zynqmp_register_clocks(np);
zynqmp_data->num = clock_max_idx;
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, zynqmp_data);
-
- return 0;
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, zynqmp_data);
}
static int zynqmp_clock_probe(struct platform_device *pdev)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index eb661b539a3e..0f5e3983951a 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -234,8 +234,9 @@ config CLKSRC_LPC32XX
Support for the LPC32XX clocksource.
config CLKSRC_PISTACHIO
- bool "Clocksource for Pistachio SoC" if COMPILE_TEST
+ bool "Clocksource for Pistachio SoC"
depends on HAS_IOMEM
+ depends on MIPS || COMPILE_TEST
select TIMER_OF
help
Enables the clocksource for the Pistachio SoC.
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a5c5f70acfc9..954749afb5fe 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -133,6 +133,18 @@ config ARM_MEDIATEK_CPUFREQ
help
This adds the CPUFreq driver support for MediaTek SoCs.
+config ARM_MEDIATEK_CPUFREQ_HW
+ tristate "MediaTek CPUFreq HW driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ default m
+ help
+ Support for the CPUFreq HW driver.
+ Some MediaTek chipsets have a HW engine to offload the steps
+ necessary for changing the frequency of the CPUs. Firmware loaded
+ in this engine exposes a programming interface to the OS.
+ The driver implements the cpufreq interface for this HW engine.
+ Say Y if you want to support CPUFreq HW.
+
config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
depends on ARCH_OMAP2PLUS
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 27d3bd7ea9d4..48ee5859030c 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_IMX_CPUFREQ_DT) += imx-cpufreq-dt.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o
+obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ_HW) += mediatek-cpufreq-hw.o
obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index b49612895c78..28467d83c745 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -889,6 +889,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = !acpi_pstate_strict &&
!(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
+ if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
+ pr_warn(FW_WARN "P-state 0 is not max freq\n");
+
return result;
err_unreg:
@@ -918,16 +921,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
-{
- struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
- policy->cpu);
- unsigned int freq = policy->freq_table[0].frequency;
-
- if (perf->states[0].core_frequency * 1000 != freq)
- pr_warn(FW_WARN "P-state 0 is not max freq\n");
-}
-
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
@@ -955,7 +948,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.bios_limit = acpi_processor_get_bios_limit,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
- .ready = acpi_cpufreq_cpu_ready,
.resume = acpi_cpufreq_resume,
.name = "acpi-cpufreq",
.attr = acpi_cpufreq_attr,
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 231e585f6ba2..ca1d103ec449 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -137,11 +137,15 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
{ .compatible = "qcom,qcs404", },
+ { .compatible = "qcom,sa8155p" },
{ .compatible = "qcom,sc7180", },
{ .compatible = "qcom,sc7280", },
{ .compatible = "qcom,sc8180x", },
{ .compatible = "qcom,sdm845", },
+ { .compatible = "qcom,sm6350", },
{ .compatible = "qcom,sm8150", },
+ { .compatible = "qcom,sm8250", },
+ { .compatible = "qcom,sm8350", },
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index ece52863ba62..8fcaba541539 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -143,8 +143,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
}
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
-
return 0;
out_clk_put:
@@ -184,6 +182,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.exit = cpufreq_exit,
.online = cpufreq_online,
.offline = cpufreq_offline,
+ .register_em = cpufreq_register_em_with_opp,
.name = "cpufreq-dt",
.attr = cpufreq_dt_attr,
.suspend = cpufreq_generic_suspend,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 06c526d66dd3..5782b15a8caa 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1491,6 +1491,19 @@ static int cpufreq_online(unsigned int cpu)
write_lock_irqsave(&cpufreq_driver_lock, flags);
list_add(&policy->policy_list, &cpufreq_policy_list);
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ /*
+ * Register with the energy model before
+ * sched_cpufreq_governor_change() is called, which will result
+ * in rebuilding of the sched domains, which should only be done
+ * once the energy model is properly initialized for the policy
+ * first.
+ *
+ * Also, this should be called before the policy is registered
+ * with cooling framework.
+ */
+ if (cpufreq_driver->register_em)
+ cpufreq_driver->register_em(policy);
}
ret = cpufreq_init_policy(policy);
@@ -1504,10 +1517,6 @@ static int cpufreq_online(unsigned int cpu)
kobject_uevent(&policy->kobj, KOBJ_ADD);
- /* Callback for handling stuff after policy is ready */
- if (cpufreq_driver->ready)
- cpufreq_driver->ready(policy);
-
if (cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 5bf5fc759881..90beb26ed34e 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -192,7 +192,6 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
policy->clk = clks[ARM].clk;
cpufreq_generic_init(policy, freq_table, transition_latency);
policy->suspend_freq = max_freq;
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
return 0;
}
@@ -204,6 +203,7 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.target_index = imx6q_set_target,
.get = cpufreq_generic_get,
.init = imx6q_cpufreq_init,
+ .register_em = cpufreq_register_em_with_opp,
.name = "imx6q-cpufreq",
.attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b4ffe6c8a0d0..1097f826ad70 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -32,7 +32,6 @@
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
-#include "../drivers/thermal/intel/thermal_interrupt.h"
#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
@@ -220,7 +219,6 @@ struct global_params {
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
* @suspended: Whether or not the driver has been suspended.
- * @hwp_notify_work: workqueue for HWP notifications.
*
* This structure stores per CPU instance data for all CPUs.
*/
@@ -259,7 +257,6 @@ struct cpudata {
unsigned int sched_flags;
u32 hwp_boost_min;
bool suspended;
- struct delayed_work hwp_notify_work;
};
static struct cpudata **all_cpu_data;
@@ -271,6 +268,7 @@ static struct cpudata **all_cpu_data;
* @get_min: Callback to get minimum P state
* @get_turbo: Callback to get turbo P state
* @get_scaling: Callback to get frequency scaling factor
+ * @get_cpu_scaling: Get frequency scaling factor for a given cpu
* @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
* @get_val: Callback to convert P state to actual MSR write value
* @get_vid: Callback to get VID data for Atom platforms
@@ -284,6 +282,7 @@ struct pstate_funcs {
int (*get_min)(void);
int (*get_turbo)(void);
int (*get_scaling)(void);
+ int (*get_cpu_scaling)(int cpu);
int (*get_aperf_mperf_shift)(void);
u64 (*get_val)(struct cpudata*, int pstate);
void (*get_vid)(struct cpudata *);
@@ -387,6 +386,15 @@ static int intel_pstate_get_cppc_guaranteed(int cpu)
return cppc_perf.nominal_perf;
}
+static u32 intel_pstate_cppc_nominal(int cpu)
+{
+ u64 nominal_perf;
+
+ if (cppc_get_nominal_perf(cpu, &nominal_perf))
+ return 0;
+
+ return nominal_perf;
+}
#else /* CONFIG_ACPI_CPPC_LIB */
static inline void intel_pstate_set_itmt_prio(int cpu)
{
@@ -473,20 +481,6 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
acpi_processor_unregister_performance(policy->cpu);
}
-
-static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps)
-{
- return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf;
-}
-
-static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu,
- struct cppc_perf_caps *caps)
-{
- if (cppc_get_perf_caps(cpu->cpu, caps))
- return false;
-
- return caps->highest_perf && caps->lowest_perf <= caps->highest_perf;
-}
#else /* CONFIG_ACPI */
static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
@@ -509,15 +503,8 @@ static inline int intel_pstate_get_cppc_guaranteed(int cpu)
}
#endif /* CONFIG_ACPI_CPPC_LIB */
-static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu)
-{
- pr_debug("CPU%d: Using PERF_CTL scaling for HWP\n", cpu->cpu);
-
- cpu->pstate.scaling = cpu->pstate.perf_ctl_scaling;
-}
-
/**
- * intel_pstate_hybrid_hwp_calibrate - Calibrate HWP performance levels.
+ * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
* @cpu: Target CPU.
*
* On hybrid processors, HWP may expose more performance levels than there are
@@ -525,115 +512,46 @@ static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu)
* scaling factor between HWP performance levels and CPU frequency will be less
* than the scaling factor between P-state values and CPU frequency.
*
- * In that case, the scaling factor between HWP performance levels and CPU
- * frequency needs to be determined which can be done with the help of the
- * observation that certain HWP performance levels should correspond to certain
- * P-states, like for example the HWP highest performance should correspond
- * to the maximum turbo P-state of the CPU.
+ * In that case, adjust the CPU parameters used in computations accordingly.
*/
-static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu)
+static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
{
int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
int perf_ctl_turbo = pstate_funcs.get_turbo();
int turbo_freq = perf_ctl_turbo * perf_ctl_scaling;
- int perf_ctl_max = pstate_funcs.get_max();
- int max_freq = perf_ctl_max * perf_ctl_scaling;
- int scaling = INT_MAX;
- int freq;
+ int scaling = cpu->pstate.scaling;
pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
- pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, perf_ctl_max);
+ pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max());
pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
-
pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
-
-#ifdef CONFIG_ACPI
- if (IS_ENABLED(CONFIG_ACPI_CPPC_LIB)) {
- struct cppc_perf_caps caps;
-
- if (intel_pstate_cppc_perf_caps(cpu, &caps)) {
- if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) {
- pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu);
-
- /*
- * If the CPPC nominal performance is valid, it
- * can be assumed to correspond to cpu_khz.
- */
- if (caps.nominal_perf == perf_ctl_max_phys) {
- intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
- return;
- }
- scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf);
- } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) {
- pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu);
-
- /*
- * If the CPPC guaranteed performance is valid,
- * it can be assumed to correspond to max_freq.
- */
- if (caps.guaranteed_perf == perf_ctl_max) {
- intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
- return;
- }
- scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf);
- }
- }
- }
-#endif
- /*
- * If using the CPPC data to compute the HWP-to-frequency scaling factor
- * doesn't work, use the HWP_CAP gauranteed perf for this purpose with
- * the assumption that it corresponds to max_freq.
- */
- if (scaling > perf_ctl_scaling) {
- pr_debug("CPU%d: Using HWP_CAP guaranteed\n", cpu->cpu);
-
- if (cpu->pstate.max_pstate == perf_ctl_max) {
- intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
- return;
- }
- scaling = DIV_ROUND_UP(max_freq, cpu->pstate.max_pstate);
- if (scaling > perf_ctl_scaling) {
- /*
- * This should not happen, because it would mean that
- * the number of HWP perf levels was less than the
- * number of P-states, so use the PERF_CTL scaling in
- * that case.
- */
- pr_debug("CPU%d: scaling (%d) out of range\n", cpu->cpu,
- scaling);
-
- intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
- return;
- }
- }
+ pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
/*
- * If the product of the HWP performance scaling factor obtained above
- * and the HWP_CAP highest performance is greater than the maximum turbo
- * frequency corresponding to the pstate_funcs.get_turbo() return value,
- * the scaling factor is too high, so recompute it so that the HWP_CAP
- * highest performance corresponds to the maximum turbo frequency.
+ * If the product of the HWP performance scaling factor and the HWP_CAP
+ * highest performance is greater than the maximum turbo frequency
+ * corresponding to the pstate_funcs.get_turbo() return value, the
+ * scaling factor is too high, so recompute it to make the HWP_CAP
+ * highest performance correspond to the maximum turbo frequency.
*/
if (turbo_freq < cpu->pstate.turbo_pstate * scaling) {
- pr_debug("CPU%d: scaling too high (%d)\n", cpu->cpu, scaling);
-
cpu->pstate.turbo_freq = turbo_freq;
scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate);
- }
-
- cpu->pstate.scaling = scaling;
+ cpu->pstate.scaling = scaling;
- pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
+ pr_debug("CPU%d: refined HWP-to-frequency scaling factor: %d\n",
+ cpu->cpu, scaling);
+ }
cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
perf_ctl_scaling);
- freq = perf_ctl_max_phys * perf_ctl_scaling;
- cpu->pstate.max_pstate_physical = DIV_ROUND_UP(freq, scaling);
+ cpu->pstate.max_pstate_physical =
+ DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling,
+ scaling);
cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
/*
@@ -1628,40 +1546,6 @@ static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
/************************** sysfs end ************************/
-static void intel_pstate_notify_work(struct work_struct *work)
-{
- mutex_lock(&intel_pstate_driver_lock);
- cpufreq_update_policy(smp_processor_id());
- wrmsrl(MSR_HWP_STATUS, 0);
- mutex_unlock(&intel_pstate_driver_lock);
-}
-
-void notify_hwp_interrupt(void)
-{
- unsigned int this_cpu = smp_processor_id();
- struct cpudata *cpudata;
- u64 value;
-
- if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
- return;
-
- rdmsrl(MSR_HWP_STATUS, value);
- if (!(value & 0x01))
- return;
-
- cpudata = all_cpu_data[this_cpu];
- schedule_delayed_work_on(this_cpu, &cpudata->hwp_notify_work, msecs_to_jiffies(10));
-}
-
-static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
-{
- /* Enable HWP notification interrupt for guaranteed performance change */
- if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
- INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
- }
-}
-
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt as we don't process them */
@@ -1671,8 +1555,6 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
if (cpudata->epp_default == -EINVAL)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
-
- intel_pstate_enable_hwp_interrupt(cpudata);
}
static int atom_get_min_pstate(void)
@@ -1900,6 +1782,38 @@ static int knl_get_turbo_pstate(void)
return ret;
}
+#ifdef CONFIG_ACPI_CPPC_LIB
+static u32 hybrid_ref_perf;
+
+static int hybrid_get_cpu_scaling(int cpu)
+{
+ return DIV_ROUND_UP(core_get_scaling() * hybrid_ref_perf,
+ intel_pstate_cppc_nominal(cpu));
+}
+
+static void intel_pstate_cppc_set_cpu_scaling(void)
+{
+ u32 min_nominal_perf = U32_MAX;
+ int cpu;
+
+ for_each_present_cpu(cpu) {
+ u32 nominal_perf = intel_pstate_cppc_nominal(cpu);
+
+ if (nominal_perf && nominal_perf < min_nominal_perf)
+ min_nominal_perf = nominal_perf;
+ }
+
+ if (min_nominal_perf < U32_MAX) {
+ hybrid_ref_perf = min_nominal_perf;
+ pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling;
+ }
+}
+#else
+static inline void intel_pstate_cppc_set_cpu_scaling(void)
+{
+}
+#endif /* CONFIG_ACPI_CPPC_LIB */
+
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
@@ -1928,10 +1842,8 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
- bool hybrid_cpu = boot_cpu_has(X86_FEATURE_HYBRID_CPU);
int perf_ctl_max_phys = pstate_funcs.get_max_physical();
- int perf_ctl_scaling = hybrid_cpu ? cpu_khz / perf_ctl_max_phys :
- pstate_funcs.get_scaling();
+ int perf_ctl_scaling = pstate_funcs.get_scaling();
cpu->pstate.min_pstate = pstate_funcs.get_min();
cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
@@ -1940,10 +1852,13 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
if (hwp_active && !hwp_mode_bdw) {
__intel_pstate_get_hwp_cap(cpu);
- if (hybrid_cpu)
- intel_pstate_hybrid_hwp_calibrate(cpu);
- else
+ if (pstate_funcs.get_cpu_scaling) {
+ cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
+ if (cpu->pstate.scaling != perf_ctl_scaling)
+ intel_pstate_hybrid_hwp_adjust(cpu);
+ } else {
cpu->pstate.scaling = perf_ctl_scaling;
+ }
} else {
cpu->pstate.scaling = perf_ctl_scaling;
cpu->pstate.max_pstate = pstate_funcs.get_max();
@@ -3315,6 +3230,9 @@ static int __init intel_pstate_init(void)
if (!default_driver)
default_driver = &intel_pstate;
+ if (boot_cpu_has(X86_FEATURE_HYBRID_CPU))
+ intel_pstate_cppc_set_cpu_scaling();
+
goto hwp_cpu_matched;
}
} else {
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
new file mode 100644
index 000000000000..0cf18dd46b92
--- /dev/null
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cpufreq.h>
+#include <linux/energy_model.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+#define LUT_MAX_ENTRIES 32U
+#define LUT_FREQ GENMASK(11, 0)
+#define LUT_ROW_SIZE 0x4
+#define CPUFREQ_HW_STATUS BIT(0)
+#define SVS_HW_STATUS BIT(1)
+#define POLL_USEC 1000
+#define TIMEOUT_USEC 300000
+
+enum {
+ REG_FREQ_LUT_TABLE,
+ REG_FREQ_ENABLE,
+ REG_FREQ_PERF_STATE,
+ REG_FREQ_HW_STATE,
+ REG_EM_POWER_TBL,
+ REG_FREQ_LATENCY,
+
+ REG_ARRAY_SIZE,
+};
+
+struct mtk_cpufreq_data {
+ struct cpufreq_frequency_table *table;
+ void __iomem *reg_bases[REG_ARRAY_SIZE];
+ int nr_opp;
+};
+
+static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x110,
+};
+
+static int __maybe_unused
+mtk_cpufreq_get_cpu_power(unsigned long *mW,
+ unsigned long *KHz, struct device *cpu_dev)
+{
+ struct mtk_cpufreq_data *data;
+ struct cpufreq_policy *policy;
+ int i;
+
+ policy = cpufreq_cpu_get_raw(cpu_dev->id);
+ if (!policy)
+ return 0;
+
+ data = policy->driver_data;
+
+ for (i = 0; i < data->nr_opp; i++) {
+ if (data->table[i].frequency < *KHz)
+ break;
+ }
+ i--;
+
+ *KHz = data->table[i].frequency;
+ *mW = readl_relaxed(data->reg_bases[REG_EM_POWER_TBL] +
+ i * LUT_ROW_SIZE) / 1000;
+
+ return 0;
+}
+
+static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct mtk_cpufreq_data *data = policy->driver_data;
+
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+
+ return 0;
+}
+
+static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
+{
+ struct mtk_cpufreq_data *data;
+ struct cpufreq_policy *policy;
+ unsigned int index;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (!policy)
+ return 0;
+
+ data = policy->driver_data;
+
+ index = readl_relaxed(data->reg_bases[REG_FREQ_PERF_STATE]);
+ index = min(index, LUT_MAX_ENTRIES - 1);
+
+ return data->table[index].frequency;
+}
+
+static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct mtk_cpufreq_data *data = policy->driver_data;
+ unsigned int index;
+
+ index = cpufreq_table_find_index_dl(policy, target_freq);
+
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+
+ return policy->freq_table[index].frequency;
+}
+
+static int mtk_cpu_create_freq_table(struct platform_device *pdev,
+ struct mtk_cpufreq_data *data)
+{
+ struct device *dev = &pdev->dev;
+ u32 temp, i, freq, prev_freq = 0;
+ void __iomem *base_table;
+
+ data->table = devm_kcalloc(dev, LUT_MAX_ENTRIES + 1,
+ sizeof(*data->table), GFP_KERNEL);
+ if (!data->table)
+ return -ENOMEM;
+
+ base_table = data->reg_bases[REG_FREQ_LUT_TABLE];
+
+ for (i = 0; i < LUT_MAX_ENTRIES; i++) {
+ temp = readl_relaxed(base_table + (i * LUT_ROW_SIZE));
+ freq = FIELD_GET(LUT_FREQ, temp) * 1000;
+
+ if (freq == prev_freq)
+ break;
+
+ data->table[i].frequency = freq;
+
+ dev_dbg(dev, "index=%d freq=%d\n", i, data->table[i].frequency);
+
+ prev_freq = freq;
+ }
+
+ data->table[i].frequency = CPUFREQ_TABLE_END;
+ data->nr_opp = i;
+
+ return 0;
+}
+
+static int mtk_cpu_resources_init(struct platform_device *pdev,
+ struct cpufreq_policy *policy,
+ const u16 *offsets)
+{
+ struct mtk_cpufreq_data *data;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ int ret, i;
+ int index;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ index = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
+ "#performance-domain-cells",
+ policy->cpus);
+ if (index < 0)
+ return index;
+
+ base = devm_platform_ioremap_resource(pdev, index);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
+ data->reg_bases[i] = base + offsets[i];
+
+ ret = mtk_cpu_create_freq_table(pdev, data);
+ if (ret) {
+ dev_info(dev, "Domain-%d failed to create freq table\n", index);
+ return ret;
+ }
+
+ policy->freq_table = data->table;
+ policy->driver_data = data;
+
+ return 0;
+}
+
+static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+{
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS;
+ struct mtk_cpufreq_data *data;
+ unsigned int latency;
+ int ret;
+
+ /* Get the bases of cpufreq for domains */
+ ret = mtk_cpu_resources_init(pdev, policy, platform_get_drvdata(pdev));
+ if (ret) {
+ dev_info(&pdev->dev, "CPUFreq resource init failed\n");
+ return ret;
+ }
+
+ data = policy->driver_data;
+
+ latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000;
+ if (!latency)
+ latency = CPUFREQ_ETERNAL;
+
+ policy->cpuinfo.transition_latency = latency;
+ policy->fast_switch_possible = true;
+
+ /* HW should be in enabled state to proceed now */
+ writel_relaxed(0x1, data->reg_bases[REG_FREQ_ENABLE]);
+ if (readl_poll_timeout(data->reg_bases[REG_FREQ_HW_STATE], sig,
+ (sig & pwr_hw) == pwr_hw, POLL_USEC,
+ TIMEOUT_USEC)) {
+ if (!(sig & CPUFREQ_HW_STATUS)) {
+ pr_info("cpufreq hardware of CPU%d is not enabled\n",
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ pr_info("SVS of CPU%d is not enabled\n", policy->cpu);
+ }
+
+ return 0;
+}
+
+static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct mtk_cpufreq_data *data = policy->driver_data;
+
+ /* HW should be in paused state now */
+ writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]);
+
+ return 0;
+}
+
+static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+ struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power);
+ struct mtk_cpufreq_data *data = policy->driver_data;
+
+ em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp,
+ &em_cb, policy->cpus, true);
+}
+
+static struct cpufreq_driver cpufreq_mtk_hw_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_IS_COOLING_DEV,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = mtk_cpufreq_hw_target_index,
+ .get = mtk_cpufreq_hw_get,
+ .init = mtk_cpufreq_hw_cpu_init,
+ .exit = mtk_cpufreq_hw_cpu_exit,
+ .register_em = mtk_cpufreq_register_em,
+ .fast_switch = mtk_cpufreq_hw_fast_switch,
+ .name = "mtk-cpufreq-hw",
+ .attr = cpufreq_generic_attr,
+};
+
+static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
+{
+ const void *data;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, (void *) data);
+ cpufreq_mtk_hw_driver.driver_data = pdev;
+
+ ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver);
+ if (ret)
+ dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
+
+ return ret;
+}
+
+static int mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
+{
+ return cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
+}
+
+static const struct of_device_id mtk_cpufreq_hw_match[] = {
+ { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
+ {}
+};
+
+static struct platform_driver mtk_cpufreq_hw_driver = {
+ .probe = mtk_cpufreq_hw_driver_probe,
+ .remove = mtk_cpufreq_hw_driver_remove,
+ .driver = {
+ .name = "mtk-cpufreq-hw",
+ .of_match_table = mtk_cpufreq_hw_match,
+ },
+};
+module_platform_driver(mtk_cpufreq_hw_driver);
+
+MODULE_AUTHOR("Hector Yuan <hector.yuan@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek cpufreq-hw driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 87019d5a9547..866163883b48 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -448,8 +448,6 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
policy->driver_data = info;
policy->clk = info->cpu_clk;
- dev_pm_opp_of_register_em(info->cpu_dev, policy->cpus);
-
return 0;
}
@@ -471,6 +469,7 @@ static struct cpufreq_driver mtk_cpufreq_driver = {
.get = cpufreq_generic_get,
.init = mtk_cpufreq_init,
.exit = mtk_cpufreq_exit,
+ .register_em = cpufreq_register_em_with_opp,
.name = "mtk-cpufreq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index e035ee216b0f..1b50df06c6bc 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -131,7 +131,6 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
/* FIXME: what's the actual transition time? */
cpufreq_generic_init(policy, freq_table, 300 * 1000);
- dev_pm_opp_of_register_em(mpu_dev, policy->cpus);
return 0;
}
@@ -150,6 +149,7 @@ static struct cpufreq_driver omap_driver = {
.get = cpufreq_generic_get,
.init = omap_cpu_init,
.exit = omap_cpu_exit,
+ .register_em = cpufreq_register_em_with_opp,
.name = "omap",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 23a06cba392c..5a2cf5f91ccb 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -36,6 +36,7 @@
#define MAX_PSTATE_SHIFT 32
#define LPSTATE_SHIFT 48
#define GPSTATE_SHIFT 56
+#define MAX_NR_CHIPS 32
#define MAX_RAMP_DOWN_TIME 5120
/*
@@ -1046,12 +1047,20 @@ static int init_chip_info(void)
unsigned int *chip;
unsigned int cpu, i;
unsigned int prev_chip_id = UINT_MAX;
+ cpumask_t *chip_cpu_mask;
int ret = 0;
chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
+ /* Allocate a chip cpu mask large enough to fit mask for all chips */
+ chip_cpu_mask = kcalloc(MAX_NR_CHIPS, sizeof(cpumask_t), GFP_KERNEL);
+ if (!chip_cpu_mask) {
+ ret = -ENOMEM;
+ goto free_and_return;
+ }
+
for_each_possible_cpu(cpu) {
unsigned int id = cpu_to_chip_id(cpu);
@@ -1059,22 +1068,25 @@ static int init_chip_info(void)
prev_chip_id = id;
chip[nr_chips++] = id;
}
+ cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]);
}
chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
if (!chips) {
ret = -ENOMEM;
- goto free_and_return;
+ goto out_free_chip_cpu_mask;
}
for (i = 0; i < nr_chips; i++) {
chips[i].id = chip[i];
- cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
+ cpumask_copy(&chips[i].mask, &chip_cpu_mask[i]);
INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
for_each_cpu(cpu, &chips[i].mask)
per_cpu(chip_info, cpu) = &chips[i];
}
+out_free_chip_cpu_mask:
+ kfree(chip_cpu_mask);
free_and_return:
kfree(chip);
return ret;
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index f86859bf76f1..a2be0df7e174 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -7,12 +7,14 @@
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/interconnect.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#define LUT_MAX_ENTRIES 40U
#define LUT_SRC GENMASK(31, 30)
@@ -22,10 +24,13 @@
#define CLK_HW_DIV 2
#define LUT_TURBO_IND 1
+#define HZ_PER_KHZ 1000
+
struct qcom_cpufreq_soc_data {
u32 reg_enable;
u32 reg_freq_lut;
u32 reg_volt_lut;
+ u32 reg_current_vote;
u32 reg_perf_state;
u8 lut_row_size;
};
@@ -34,6 +39,16 @@ struct qcom_cpufreq_data {
void __iomem *base;
struct resource *res;
const struct qcom_cpufreq_soc_data *soc_data;
+
+ /*
+ * Mutex to synchronize between de-init sequence and re-starting LMh
+ * polling/interrupts
+ */
+ struct mutex throttle_lock;
+ int throttle_irq;
+ bool cancel_throttle;
+ struct delayed_work throttle_work;
+ struct cpufreq_policy *policy;
};
static unsigned long cpu_hw_rate, xo_rate;
@@ -251,10 +266,92 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
}
}
+static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
+{
+ unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote);
+
+ return (val & 0x3FF) * 19200;
+}
+
+static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
+{
+ unsigned long max_capacity, capacity, freq_hz, throttled_freq;
+ struct cpufreq_policy *policy = data->policy;
+ int cpu = cpumask_first(policy->cpus);
+ struct device *dev = get_cpu_device(cpu);
+ struct dev_pm_opp *opp;
+ unsigned int freq;
+
+ /*
+ * Get the h/w throttled frequency, normalize it using the
+ * registered opp table and use it to calculate thermal pressure.
+ */
+ freq = qcom_lmh_get_throttle_freq(data);
+ freq_hz = freq * HZ_PER_KHZ;
+
+ opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
+ if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
+ dev_pm_opp_find_freq_ceil(dev, &freq_hz);
+
+ throttled_freq = freq_hz / HZ_PER_KHZ;
+
+ /* Update thermal pressure */
+
+ max_capacity = arch_scale_cpu_capacity(cpu);
+ capacity = mult_frac(max_capacity, throttled_freq, policy->cpuinfo.max_freq);
+
+ /* Don't pass boost capacity to scheduler */
+ if (capacity > max_capacity)
+ capacity = max_capacity;
+
+ arch_set_thermal_pressure(policy->cpus, max_capacity - capacity);
+
+ /*
+ * In the unlikely case policy is unregistered do not enable
+ * polling or h/w interrupt
+ */
+ mutex_lock(&data->throttle_lock);
+ if (data->cancel_throttle)
+ goto out;
+
+ /*
+ * If h/w throttled frequency is higher than what cpufreq has requested
+ * for, then stop polling and switch back to interrupt mechanism.
+ */
+ if (throttled_freq >= qcom_cpufreq_hw_get(cpu))
+ enable_irq(data->throttle_irq);
+ else
+ mod_delayed_work(system_highpri_wq, &data->throttle_work,
+ msecs_to_jiffies(10));
+
+out:
+ mutex_unlock(&data->throttle_lock);
+}
+
+static void qcom_lmh_dcvs_poll(struct work_struct *work)
+{
+ struct qcom_cpufreq_data *data;
+
+ data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
+ qcom_lmh_dcvs_notify(data);
+}
+
+static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
+{
+ struct qcom_cpufreq_data *c_data = data;
+
+ /* Disable interrupt and enable polling */
+ disable_irq_nosync(c_data->throttle_irq);
+ qcom_lmh_dcvs_notify(c_data);
+
+ return 0;
+}
+
static const struct qcom_cpufreq_soc_data qcom_soc_data = {
.reg_enable = 0x0,
.reg_freq_lut = 0x110,
.reg_volt_lut = 0x114,
+ .reg_current_vote = 0x704,
.reg_perf_state = 0x920,
.lut_row_size = 32,
};
@@ -274,6 +371,51 @@ static const struct of_device_id qcom_cpufreq_hw_match[] = {
};
MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
+static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
+{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ char irq_name[15];
+ int ret;
+
+ /*
+ * Look for LMh interrupt. If no interrupt line is specified /
+ * if there is an error, allow cpufreq to be enabled as usual.
+ */
+ data->throttle_irq = platform_get_irq(pdev, index);
+ if (data->throttle_irq <= 0)
+ return data->throttle_irq == -EPROBE_DEFER ? -EPROBE_DEFER : 0;
+
+ data->cancel_throttle = false;
+ data->policy = policy;
+
+ mutex_init(&data->throttle_lock);
+ INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
+
+ snprintf(irq_name, sizeof(irq_name), "dcvsh-irq-%u", policy->cpu);
+ ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
+ IRQF_ONESHOT, irq_name, data);
+ if (ret) {
+ dev_err(&pdev->dev, "Error registering %s: %d\n", irq_name, ret);
+ return 0;
+ }
+
+ return 0;
+}
+
+static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
+{
+ if (data->throttle_irq <= 0)
+ return;
+
+ mutex_lock(&data->throttle_lock);
+ data->cancel_throttle = true;
+ mutex_unlock(&data->throttle_lock);
+
+ cancel_delayed_work_sync(&data->throttle_work);
+ free_irq(data->throttle_irq, data);
+}
+
static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
struct platform_device *pdev = cpufreq_get_driver_data();
@@ -348,6 +490,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
}
policy->driver_data = data;
+ policy->dvfs_possible_from_any_cpu = true;
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
if (ret) {
@@ -362,14 +505,16 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
goto error;
}
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
-
if (policy_has_boost_freq(policy)) {
ret = cpufreq_enable_boost_support();
if (ret)
dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
}
+ ret = qcom_cpufreq_hw_lmh_init(policy, index);
+ if (ret)
+ goto error;
+
return 0;
error:
kfree(data);
@@ -389,6 +534,7 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
dev_pm_opp_remove_all_dynamic(cpu_dev);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+ qcom_cpufreq_hw_lmh_exit(data);
kfree(policy->freq_table);
kfree(data);
iounmap(base);
@@ -412,6 +558,7 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.get = qcom_cpufreq_hw_get,
.init = qcom_cpufreq_hw_cpu_init,
.exit = qcom_cpufreq_hw_cpu_exit,
+ .register_em = cpufreq_register_em_with_opp,
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",
.attr = qcom_cpufreq_hw_attr,
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 75f818d04b48..1e0cd4d165f0 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -22,7 +22,9 @@
struct scmi_data {
int domain_id;
+ int nr_opp;
struct device *cpu_dev;
+ cpumask_var_t opp_shared_cpus;
};
static struct scmi_protocol_handle *ph;
@@ -123,9 +125,6 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev;
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
- struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
- cpumask_var_t opp_shared_cpus;
- bool power_scale_mw;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -133,9 +132,15 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
+ if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out_free_priv;
+ }
+
/* Obtain CPUs that share SCMI performance controls */
ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
if (ret) {
@@ -148,14 +153,14 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
* The OPP 'sharing cpus' info may come from DT through an empty opp
* table and opp-shared.
*/
- ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, opp_shared_cpus);
- if (ret || !cpumask_weight(opp_shared_cpus)) {
+ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
+ if (ret || !cpumask_weight(priv->opp_shared_cpus)) {
/*
* Either opp-table is not set or no opp-shared was found.
* Use the CPU mask from SCMI to designate CPUs sharing an OPP
* table.
*/
- cpumask_copy(opp_shared_cpus, policy->cpus);
+ cpumask_copy(priv->opp_shared_cpus, policy->cpus);
}
/*
@@ -180,7 +185,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
- ret = dev_pm_opp_set_sharing_cpus(cpu_dev, opp_shared_cpus);
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
if (ret) {
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
@@ -188,21 +193,13 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
- power_scale_mw = perf_ops->power_scale_mw_get(ph);
- em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb,
- opp_shared_cpus, power_scale_mw);
- }
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto out_free_opp;
+ priv->nr_opp = nr_opp;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
- goto out_free_priv;
+ goto out_free_opp;
}
priv->cpu_dev = cpu_dev;
@@ -223,17 +220,16 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible =
perf_ops->fast_switch_possible(ph, cpu_dev);
- free_cpumask_var(opp_shared_cpus);
return 0;
-out_free_priv:
- kfree(priv);
-
out_free_opp:
dev_pm_opp_remove_all_dynamic(cpu_dev);
out_free_cpumask:
- free_cpumask_var(opp_shared_cpus);
+ free_cpumask_var(priv->opp_shared_cpus);
+
+out_free_priv:
+ kfree(priv);
return ret;
}
@@ -244,11 +240,33 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+ free_cpumask_var(priv->opp_shared_cpus);
kfree(priv);
return 0;
}
+static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+ struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
+ bool power_scale_mw = perf_ops->power_scale_mw_get(ph);
+ struct scmi_data *priv = policy->driver_data;
+
+ /*
+ * This callback will be called for each policy, but we don't need to
+ * register with EM every time. Despite not being part of the same
+ * policy, some CPUs may still share their perf-domains, and a CPU from
+ * another policy may already have registered with EM on behalf of CPUs
+ * of this policy.
+ */
+ if (!priv->nr_opp)
+ return;
+
+ em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp,
+ &em_cb, priv->opp_shared_cpus,
+ power_scale_mw);
+}
+
static struct cpufreq_driver scmi_cpufreq_driver = {
.name = "scmi",
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@@ -261,6 +279,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
.get = scmi_cpufreq_get_rate,
.init = scmi_cpufreq_init,
.exit = scmi_cpufreq_exit,
+ .register_em = scmi_cpufreq_register_em,
};
static int scmi_cpufreq_probe(struct scmi_device *sdev)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index d6a698a1b5d1..bda3e7d42964 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -163,8 +163,6 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = false;
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
-
return 0;
out_free_cpufreq_table:
@@ -200,6 +198,7 @@ static struct cpufreq_driver scpi_cpufreq_driver = {
.init = scpi_cpufreq_init,
.exit = scpi_cpufreq_exit,
.target_index = scpi_cpufreq_set_target,
+ .register_em = cpufreq_register_em_with_opp,
};
static int scpi_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 1a251e635ebd..b8704232c27b 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -145,16 +145,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static void sh_cpufreq_cpu_ready(struct cpufreq_policy *policy)
-{
- struct device *dev = get_cpu_device(policy->cpu);
-
- dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, "
- "Maximum %u.%03u MHz.\n",
- policy->min / 1000, policy->min % 1000,
- policy->max / 1000, policy->max % 1000);
-}
-
static struct cpufreq_driver sh_cpufreq_driver = {
.name = "sh",
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
@@ -163,7 +153,6 @@ static struct cpufreq_driver sh_cpufreq_driver = {
.verify = sh_cpufreq_verify,
.init = sh_cpufreq_cpu_init,
.exit = sh_cpufreq_cpu_exit,
- .ready = sh_cpufreq_cpu_ready,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 51dfa9ae6cf5..284b6bd040b1 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -15,7 +15,6 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
-#include <linux/cpu_cooling.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -47,7 +46,6 @@ static bool bL_switching_enabled;
#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
-static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
static struct clk *clk[MAX_CLUSTERS];
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
static atomic_t cluster_usage[MAX_CLUSTERS + 1];
@@ -442,8 +440,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
policy->freq_table = freq_table[cur_cluster];
policy->cpuinfo.transition_latency = 1000000; /* 1 ms */
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
-
if (is_bL_switching_enabled())
per_cpu(cpu_last_req_freq, policy->cpu) =
clk_get_cpu_rate(policy->cpu);
@@ -457,11 +453,6 @@ static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
struct device *cpu_dev;
int cur_cluster = cpu_to_cluster(policy->cpu);
- if (cur_cluster < MAX_CLUSTERS) {
- cpufreq_cooling_unregister(cdev[cur_cluster]);
- cdev[cur_cluster] = NULL;
- }
-
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
@@ -473,17 +464,6 @@ static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
return 0;
}
-static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy)
-{
- int cur_cluster = cpu_to_cluster(policy->cpu);
-
- /* Do not register a cpu_cooling device if we are in IKS mode */
- if (cur_cluster >= MAX_CLUSTERS)
- return;
-
- cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
-}
-
static struct cpufreq_driver ve_spc_cpufreq_driver = {
.name = "vexpress-spc",
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@@ -493,7 +473,7 @@ static struct cpufreq_driver ve_spc_cpufreq_driver = {
.get = ve_spc_cpufreq_get_rate,
.init = ve_spc_cpufreq_init,
.exit = ve_spc_cpufreq_exit,
- .ready = ve_spc_cpufreq_ready,
+ .register_em = cpufreq_register_em_with_opp,
.attr = cpufreq_generic_attr,
};
@@ -553,6 +533,9 @@ static int ve_spc_cpufreq_probe(struct platform_device *pdev)
for (i = 0; i < MAX_CLUSTERS; i++)
mutex_init(&cluster_lock[i]);
+ if (!is_bL_switching_enabled())
+ ve_spc_cpufreq_driver.flags |= CPUFREQ_IS_COOLING_DEV;
+
ret = cpufreq_register_driver(&ve_spc_cpufreq_driver);
if (ret) {
pr_info("%s: Failed registering platform driver: %s, err: %d\n",
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index a2b5c6f60cf0..7e7ab5597d7a 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -346,11 +346,9 @@ static int pseries_cpuidle_driver_init(void)
static void __init fixup_cede0_latency(void)
{
struct xcede_latency_payload *payload;
- u64 min_latency_us;
+ u64 min_xcede_latency_us = UINT_MAX;
int i;
- min_latency_us = dedicated_states[1].exit_latency; // CEDE latency
-
if (parse_cede_parameters())
return;
@@ -358,42 +356,45 @@ static void __init fixup_cede0_latency(void)
nr_xcede_records);
payload = &xcede_latency_parameter.payload;
+
+ /*
+ * The CEDE idle state maps to CEDE(0). While the hypervisor
+ * does not advertise CEDE(0) exit latency values, it does
+ * advertise the latency values of the extended CEDE states.
+ * We use the lowest advertised exit latency value as a proxy
+ * for the exit latency of CEDE(0).
+ */
for (i = 0; i < nr_xcede_records; i++) {
struct xcede_latency_record *record = &payload->records[i];
+ u8 hint = record->hint;
u64 latency_tb = be64_to_cpu(record->latency_ticks);
u64 latency_us = DIV_ROUND_UP_ULL(tb_to_ns(latency_tb), NSEC_PER_USEC);
- if (latency_us == 0)
- pr_warn("cpuidle: xcede record %d has an unrealistic latency of 0us.\n", i);
-
- if (latency_us < min_latency_us)
- min_latency_us = latency_us;
- }
-
- /*
- * By default, we assume that CEDE(0) has exit latency 10us,
- * since there is no way for us to query from the platform.
- *
- * However, if the wakeup latency of an Extended CEDE state is
- * smaller than 10us, then we can be sure that CEDE(0)
- * requires no more than that.
- *
- * Perform the fix-up.
- */
- if (min_latency_us < dedicated_states[1].exit_latency) {
/*
- * We set a minimum of 1us wakeup latency for cede0 to
- * distinguish it from snooze
+ * We expect the exit latency of an extended CEDE
+ * state to be non-zero, it to since it takes at least
+ * a few nanoseconds to wakeup the idle CPU and
+ * dispatch the virtual processor into the Linux
+ * Guest.
+ *
+ * So we consider only non-zero value for performing
+ * the fixup of CEDE(0) latency.
*/
- u64 cede0_latency = 1;
+ if (latency_us == 0) {
+ pr_warn("cpuidle: Skipping xcede record %d [hint=%d]. Exit latency = 0us\n",
+ i, hint);
+ continue;
+ }
- if (min_latency_us > cede0_latency)
- cede0_latency = min_latency_us - 1;
+ if (latency_us < min_xcede_latency_us)
+ min_xcede_latency_us = latency_us;
+ }
- dedicated_states[1].exit_latency = cede0_latency;
- dedicated_states[1].target_residency = 10 * (cede0_latency);
+ if (min_xcede_latency_us != UINT_MAX) {
+ dedicated_states[1].exit_latency = min_xcede_latency_us;
+ dedicated_states[1].target_residency = 10 * (min_xcede_latency_us);
pr_info("cpuidle: Fixed up CEDE exit latency to %llu us\n",
- cede0_latency);
+ min_xcede_latency_us);
}
}
@@ -402,7 +403,7 @@ static void __init fixup_cede0_latency(void)
* pseries_idle_probe()
* Choose state table for shared versus dedicated partition
*/
-static int pseries_idle_probe(void)
+static int __init pseries_idle_probe(void)
{
if (cpuidle_disable != IDLE_NO_OVERRIDE)
@@ -419,7 +420,21 @@ static int pseries_idle_probe(void)
cpuidle_state_table = shared_states;
max_idle_state = ARRAY_SIZE(shared_states);
} else {
- fixup_cede0_latency();
+ /*
+ * Use firmware provided latency values
+ * starting with POWER10 platforms. In the
+ * case that we are running on a POWER10
+ * platform but in an earlier compat mode, we
+ * can still use the firmware provided values.
+ *
+ * However, on platforms prior to POWER10, we
+ * cannot rely on the accuracy of the firmware
+ * provided latency values. On such platforms,
+ * go with the conservative default estimate
+ * of 10us.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_31) || pvr_version_is(PVR_POWER10))
+ fixup_cede0_latency();
cpuidle_state_table = dedicated_states;
max_idle_state = NR_DEDICATED_STATES;
}
diff --git a/drivers/cpuidle/cpuidle-ux500.c b/drivers/cpuidle/cpuidle-ux500.c
index a2d34be17a09..f7d778580e9b 100644
--- a/drivers/cpuidle/cpuidle-ux500.c
+++ b/drivers/cpuidle/cpuidle-ux500.c
@@ -117,7 +117,7 @@ static int dbx500_cpuidle_probe(struct platform_device *pdev)
static struct platform_driver dbx500_cpuidle_plat_driver = {
.driver = {
- .name = "cpuidle-dbx500",
+ .name = "db8500-cpuidle",
},
.probe = dbx500_cpuidle_probe,
};
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 96bc7b5c6532..6c61817996a3 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -306,9 +306,7 @@ static int nitrox_device_flr(struct pci_dev *pdev)
return -ENOMEM;
}
- /* check flr support */
- if (pcie_has_flr(pdev))
- pcie_flr(pdev);
+ pcie_reset_flr(pdev, PCI_RESET_DO_RESET);
pci_restore_state(pdev);
diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile
index 32954059b37b..d1aaabc940f3 100644
--- a/drivers/cxl/Makefile
+++ b/drivers/cxl/Makefile
@@ -1,11 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CXL_BUS) += cxl_core.o
+obj-$(CONFIG_CXL_BUS) += core/
obj-$(CONFIG_CXL_MEM) += cxl_pci.o
obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o
obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o
-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL
-cxl_core-y := core.o
cxl_pci-y := pci.o
cxl_acpi-y := acpi.o
cxl_pmem-y := pmem.o
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 8ae89273f58e..54e9d4d2cf5f 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -243,6 +243,9 @@ static struct acpi_device *to_cxl_host_bridge(struct device *dev)
{
struct acpi_device *adev = to_acpi_device(dev);
+ if (!acpi_pci_find_root(adev->handle))
+ return NULL;
+
if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
return adev;
return NULL;
@@ -266,10 +269,6 @@ static int add_host_bridge_uport(struct device *match, void *arg)
if (!bridge)
return 0;
- pci_root = acpi_pci_find_root(bridge->handle);
- if (!pci_root)
- return -ENXIO;
-
dport = find_dport_by_dev(root_port, match);
if (!dport) {
dev_dbg(host, "host bridge expected and not found\n");
@@ -282,6 +281,11 @@ static int add_host_bridge_uport(struct device *match, void *arg)
return PTR_ERR(port);
dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
+ /*
+ * Note that this lookup already succeeded in
+ * to_cxl_host_bridge(), so no need to check for failure here
+ */
+ pci_root = acpi_pci_find_root(bridge->handle);
ctx = (struct cxl_walk_context){
.dev = host,
.root = pci_root->bus,
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
new file mode 100644
index 000000000000..0fdbf3c6ac1a
--- /dev/null
+++ b/drivers/cxl/core/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CXL_BUS) += cxl_core.o
+
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -I$(srctree)/drivers/cxl
+cxl_core-y := bus.o
+cxl_core-y += pmem.o
+cxl_core-y += regs.o
+cxl_core-y += memdev.o
diff --git a/drivers/cxl/core.c b/drivers/cxl/core/bus.c
index 2b90b7c3b9d7..267d8042bec2 100644
--- a/drivers/cxl/core.c
+++ b/drivers/cxl/core/bus.c
@@ -6,14 +6,22 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/idr.h>
-#include "cxl.h"
-#include "mem.h"
+#include <cxlmem.h>
+#include <cxl.h>
+#include "core.h"
/**
* DOC: cxl core
*
- * The CXL core provides a sysfs hierarchy for control devices and a rendezvous
- * point for cross-device interleave coordination through cxl ports.
+ * The CXL core provides a set of interfaces that can be consumed by CXL aware
+ * drivers. The interfaces allow for creation, modification, and destruction of
+ * regions, memory devices, ports, and decoders. CXL aware drivers must register
+ * with the CXL core via these interfaces in order to be able to participate in
+ * cross-device interleave coordination. The CXL core also establishes and
+ * maintains the bridge to the nvdimm subsystem.
+ *
+ * CXL core introduces sysfs hierarchy to control the devices that are
+ * instantiated by the core.
*/
static DEFINE_IDA(cxl_port_ida);
@@ -30,7 +38,7 @@ static struct attribute *cxl_base_attributes[] = {
NULL,
};
-static struct attribute_group cxl_base_attribute_group = {
+struct attribute_group cxl_base_attribute_group = {
.attrs = cxl_base_attributes,
};
@@ -507,11 +515,6 @@ err:
return ERR_PTR(rc);
}
-static void unregister_dev(void *dev)
-{
- device_unregister(dev);
-}
-
struct cxl_decoder *
devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
resource_size_t base, resource_size_t len,
@@ -536,7 +539,7 @@ devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
if (rc)
goto err;
- rc = devm_add_action_or_reset(host, unregister_dev, dev);
+ rc = devm_add_action_or_reset(host, unregister_cxl_dev, dev);
if (rc)
return ERR_PTR(rc);
return cxld;
@@ -548,429 +551,6 @@ err:
EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
/**
- * cxl_probe_component_regs() - Detect CXL Component register blocks
- * @dev: Host device of the @base mapping
- * @base: Mapping containing the HDM Decoder Capability Header
- * @map: Map object describing the register block information found
- *
- * See CXL 2.0 8.2.4 Component Register Layout and Definition
- * See CXL 2.0 8.2.5.5 CXL Device Register Interface
- *
- * Probe for component register information and return it in map object.
- */
-void cxl_probe_component_regs(struct device *dev, void __iomem *base,
- struct cxl_component_reg_map *map)
-{
- int cap, cap_count;
- u64 cap_array;
-
- *map = (struct cxl_component_reg_map) { 0 };
-
- /*
- * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
- * CXL 2.0 8.2.4 Table 141.
- */
- base += CXL_CM_OFFSET;
-
- cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
-
- if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
- dev_err(dev,
- "Couldn't locate the CXL.cache and CXL.mem capability array header./n");
- return;
- }
-
- /* It's assumed that future versions will be backward compatible */
- cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
-
- for (cap = 1; cap <= cap_count; cap++) {
- void __iomem *register_block;
- u32 hdr;
- int decoder_cnt;
- u16 cap_id, offset;
- u32 length;
-
- hdr = readl(base + cap * 0x4);
-
- cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
- offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
- register_block = base + offset;
-
- switch (cap_id) {
- case CXL_CM_CAP_CAP_ID_HDM:
- dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
- offset);
-
- hdr = readl(register_block);
-
- decoder_cnt = cxl_hdm_decoder_count(hdr);
- length = 0x20 * decoder_cnt + 0x10;
-
- map->hdm_decoder.valid = true;
- map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
- map->hdm_decoder.size = length;
- break;
- default:
- dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
- offset);
- break;
- }
- }
-}
-EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
-
-static void cxl_nvdimm_bridge_release(struct device *dev)
-{
- struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
-
- kfree(cxl_nvb);
-}
-
-static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
- &cxl_base_attribute_group,
- NULL,
-};
-
-static const struct device_type cxl_nvdimm_bridge_type = {
- .name = "cxl_nvdimm_bridge",
- .release = cxl_nvdimm_bridge_release,
- .groups = cxl_nvdimm_bridge_attribute_groups,
-};
-
-struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
-{
- if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
- "not a cxl_nvdimm_bridge device\n"))
- return NULL;
- return container_of(dev, struct cxl_nvdimm_bridge, dev);
-}
-EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
-
-static struct cxl_nvdimm_bridge *
-cxl_nvdimm_bridge_alloc(struct cxl_port *port)
-{
- struct cxl_nvdimm_bridge *cxl_nvb;
- struct device *dev;
-
- cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
- if (!cxl_nvb)
- return ERR_PTR(-ENOMEM);
-
- dev = &cxl_nvb->dev;
- cxl_nvb->port = port;
- cxl_nvb->state = CXL_NVB_NEW;
- device_initialize(dev);
- device_set_pm_not_required(dev);
- dev->parent = &port->dev;
- dev->bus = &cxl_bus_type;
- dev->type = &cxl_nvdimm_bridge_type;
-
- return cxl_nvb;
-}
-
-static void unregister_nvb(void *_cxl_nvb)
-{
- struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
- bool flush;
-
- /*
- * If the bridge was ever activated then there might be in-flight state
- * work to flush. Once the state has been changed to 'dead' then no new
- * work can be queued by user-triggered bind.
- */
- device_lock(&cxl_nvb->dev);
- flush = cxl_nvb->state != CXL_NVB_NEW;
- cxl_nvb->state = CXL_NVB_DEAD;
- device_unlock(&cxl_nvb->dev);
-
- /*
- * Even though the device core will trigger device_release_driver()
- * before the unregister, it does not know about the fact that
- * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
- * release not and flush it before tearing down the nvdimm device
- * hierarchy.
- */
- device_release_driver(&cxl_nvb->dev);
- if (flush)
- flush_work(&cxl_nvb->state_work);
- device_unregister(&cxl_nvb->dev);
-}
-
-struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
- struct cxl_port *port)
-{
- struct cxl_nvdimm_bridge *cxl_nvb;
- struct device *dev;
- int rc;
-
- if (!IS_ENABLED(CONFIG_CXL_PMEM))
- return ERR_PTR(-ENXIO);
-
- cxl_nvb = cxl_nvdimm_bridge_alloc(port);
- if (IS_ERR(cxl_nvb))
- return cxl_nvb;
-
- dev = &cxl_nvb->dev;
- rc = dev_set_name(dev, "nvdimm-bridge");
- if (rc)
- goto err;
-
- rc = device_add(dev);
- if (rc)
- goto err;
-
- rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
- if (rc)
- return ERR_PTR(rc);
-
- return cxl_nvb;
-
-err:
- put_device(dev);
- return ERR_PTR(rc);
-}
-EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
-
-static void cxl_nvdimm_release(struct device *dev)
-{
- struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
-
- kfree(cxl_nvd);
-}
-
-static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
- &cxl_base_attribute_group,
- NULL,
-};
-
-static const struct device_type cxl_nvdimm_type = {
- .name = "cxl_nvdimm",
- .release = cxl_nvdimm_release,
- .groups = cxl_nvdimm_attribute_groups,
-};
-
-bool is_cxl_nvdimm(struct device *dev)
-{
- return dev->type == &cxl_nvdimm_type;
-}
-EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
-
-struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
-{
- if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
- "not a cxl_nvdimm device\n"))
- return NULL;
- return container_of(dev, struct cxl_nvdimm, dev);
-}
-EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
-
-static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
-{
- struct cxl_nvdimm *cxl_nvd;
- struct device *dev;
-
- cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
- if (!cxl_nvd)
- return ERR_PTR(-ENOMEM);
-
- dev = &cxl_nvd->dev;
- cxl_nvd->cxlmd = cxlmd;
- device_initialize(dev);
- device_set_pm_not_required(dev);
- dev->parent = &cxlmd->dev;
- dev->bus = &cxl_bus_type;
- dev->type = &cxl_nvdimm_type;
-
- return cxl_nvd;
-}
-
-int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
-{
- struct cxl_nvdimm *cxl_nvd;
- struct device *dev;
- int rc;
-
- cxl_nvd = cxl_nvdimm_alloc(cxlmd);
- if (IS_ERR(cxl_nvd))
- return PTR_ERR(cxl_nvd);
-
- dev = &cxl_nvd->dev;
- rc = dev_set_name(dev, "pmem%d", cxlmd->id);
- if (rc)
- goto err;
-
- rc = device_add(dev);
- if (rc)
- goto err;
-
- dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
- dev_name(dev));
-
- return devm_add_action_or_reset(host, unregister_dev, dev);
-
-err:
- put_device(dev);
- return rc;
-}
-EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
-
-/**
- * cxl_probe_device_regs() - Detect CXL Device register blocks
- * @dev: Host device of the @base mapping
- * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
- * @map: Map object describing the register block information found
- *
- * Probe for device register information and return it in map object.
- */
-void cxl_probe_device_regs(struct device *dev, void __iomem *base,
- struct cxl_device_reg_map *map)
-{
- int cap, cap_count;
- u64 cap_array;
-
- *map = (struct cxl_device_reg_map){ 0 };
-
- cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
- if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
- CXLDEV_CAP_ARRAY_CAP_ID)
- return;
-
- cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
-
- for (cap = 1; cap <= cap_count; cap++) {
- u32 offset, length;
- u16 cap_id;
-
- cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
- readl(base + cap * 0x10));
- offset = readl(base + cap * 0x10 + 0x4);
- length = readl(base + cap * 0x10 + 0x8);
-
- switch (cap_id) {
- case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
- dev_dbg(dev, "found Status capability (0x%x)\n", offset);
-
- map->status.valid = true;
- map->status.offset = offset;
- map->status.size = length;
- break;
- case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
- dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
- map->mbox.valid = true;
- map->mbox.offset = offset;
- map->mbox.size = length;
- break;
- case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
- dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
- break;
- case CXLDEV_CAP_CAP_ID_MEMDEV:
- dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
- map->memdev.valid = true;
- map->memdev.offset = offset;
- map->memdev.size = length;
- break;
- default:
- if (cap_id >= 0x8000)
- dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
- else
- dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
- break;
- }
- }
-}
-EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
-
-static void __iomem *devm_cxl_iomap_block(struct device *dev,
- resource_size_t addr,
- resource_size_t length)
-{
- void __iomem *ret_val;
- struct resource *res;
-
- res = devm_request_mem_region(dev, addr, length, dev_name(dev));
- if (!res) {
- resource_size_t end = addr + length - 1;
-
- dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
- return NULL;
- }
-
- ret_val = devm_ioremap(dev, addr, length);
- if (!ret_val)
- dev_err(dev, "Failed to map region %pr\n", res);
-
- return ret_val;
-}
-
-int cxl_map_component_regs(struct pci_dev *pdev,
- struct cxl_component_regs *regs,
- struct cxl_register_map *map)
-{
- struct device *dev = &pdev->dev;
- resource_size_t phys_addr;
- resource_size_t length;
-
- phys_addr = pci_resource_start(pdev, map->barno);
- phys_addr += map->block_offset;
-
- phys_addr += map->component_map.hdm_decoder.offset;
- length = map->component_map.hdm_decoder.size;
- regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
- if (!regs->hdm_decoder)
- return -ENOMEM;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_map_component_regs);
-
-int cxl_map_device_regs(struct pci_dev *pdev,
- struct cxl_device_regs *regs,
- struct cxl_register_map *map)
-{
- struct device *dev = &pdev->dev;
- resource_size_t phys_addr;
-
- phys_addr = pci_resource_start(pdev, map->barno);
- phys_addr += map->block_offset;
-
- if (map->device_map.status.valid) {
- resource_size_t addr;
- resource_size_t length;
-
- addr = phys_addr + map->device_map.status.offset;
- length = map->device_map.status.size;
- regs->status = devm_cxl_iomap_block(dev, addr, length);
- if (!regs->status)
- return -ENOMEM;
- }
-
- if (map->device_map.mbox.valid) {
- resource_size_t addr;
- resource_size_t length;
-
- addr = phys_addr + map->device_map.mbox.offset;
- length = map->device_map.mbox.size;
- regs->mbox = devm_cxl_iomap_block(dev, addr, length);
- if (!regs->mbox)
- return -ENOMEM;
- }
-
- if (map->device_map.memdev.valid) {
- resource_size_t addr;
- resource_size_t length;
-
- addr = phys_addr + map->device_map.memdev.offset;
- length = map->device_map.memdev.size;
- regs->memdev = devm_cxl_iomap_block(dev, addr, length);
- if (!regs->memdev)
- return -ENOMEM;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_map_device_regs);
-
-/**
* __cxl_driver_register - register a driver for the cxl bus
* @cxl_drv: cxl driver structure to attach
* @owner: owning module/driver
@@ -1053,12 +633,26 @@ EXPORT_SYMBOL_GPL(cxl_bus_type);
static __init int cxl_core_init(void)
{
- return bus_register(&cxl_bus_type);
+ int rc;
+
+ rc = cxl_memdev_init();
+ if (rc)
+ return rc;
+
+ rc = bus_register(&cxl_bus_type);
+ if (rc)
+ goto err;
+ return 0;
+
+err:
+ cxl_memdev_exit();
+ return rc;
}
static void cxl_core_exit(void)
{
bus_unregister(&cxl_bus_type);
+ cxl_memdev_exit();
}
module_init(cxl_core_init);
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
new file mode 100644
index 000000000000..036a3c8106b4
--- /dev/null
+++ b/drivers/cxl/core/core.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2020 Intel Corporation. */
+
+#ifndef __CXL_CORE_H__
+#define __CXL_CORE_H__
+
+extern const struct device_type cxl_nvdimm_bridge_type;
+extern const struct device_type cxl_nvdimm_type;
+
+extern struct attribute_group cxl_base_attribute_group;
+
+static inline void unregister_cxl_dev(void *dev)
+{
+ device_unregister(dev);
+}
+
+int cxl_memdev_init(void);
+void cxl_memdev_exit(void);
+
+#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
new file mode 100644
index 000000000000..a9c317e32010
--- /dev/null
+++ b/drivers/cxl/core/memdev.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/pci.h>
+#include <cxlmem.h>
+#include "core.h"
+
+/*
+ * An entire PCI topology full of devices should be enough for any
+ * config
+ */
+#define CXL_MEM_MAX_DEVS 65536
+
+static int cxl_mem_major;
+static DEFINE_IDA(cxl_memdev_ida);
+
+static void cxl_memdev_release(struct device *dev)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+
+ ida_free(&cxl_memdev_ida, cxlmd->id);
+ kfree(cxlmd);
+}
+
+static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
+ kgid_t *gid)
+{
+ return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
+}
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+
+ return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t payload_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+
+ return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
+}
+static DEVICE_ATTR_RO(payload_max);
+
+static ssize_t label_storage_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+
+ return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
+}
+static DEVICE_ATTR_RO(label_storage_size);
+
+static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+ unsigned long long len = range_len(&cxlm->ram_range);
+
+ return sysfs_emit(buf, "%#llx\n", len);
+}
+
+static struct device_attribute dev_attr_ram_size =
+ __ATTR(size, 0444, ram_size_show, NULL);
+
+static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+ unsigned long long len = range_len(&cxlm->pmem_range);
+
+ return sysfs_emit(buf, "%#llx\n", len);
+}
+
+static struct device_attribute dev_attr_pmem_size =
+ __ATTR(size, 0444, pmem_size_show, NULL);
+
+static struct attribute *cxl_memdev_attributes[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_payload_max.attr,
+ &dev_attr_label_storage_size.attr,
+ NULL,
+};
+
+static struct attribute *cxl_memdev_pmem_attributes[] = {
+ &dev_attr_pmem_size.attr,
+ NULL,
+};
+
+static struct attribute *cxl_memdev_ram_attributes[] = {
+ &dev_attr_ram_size.attr,
+ NULL,
+};
+
+static struct attribute_group cxl_memdev_attribute_group = {
+ .attrs = cxl_memdev_attributes,
+};
+
+static struct attribute_group cxl_memdev_ram_attribute_group = {
+ .name = "ram",
+ .attrs = cxl_memdev_ram_attributes,
+};
+
+static struct attribute_group cxl_memdev_pmem_attribute_group = {
+ .name = "pmem",
+ .attrs = cxl_memdev_pmem_attributes,
+};
+
+static const struct attribute_group *cxl_memdev_attribute_groups[] = {
+ &cxl_memdev_attribute_group,
+ &cxl_memdev_ram_attribute_group,
+ &cxl_memdev_pmem_attribute_group,
+ NULL,
+};
+
+static const struct device_type cxl_memdev_type = {
+ .name = "cxl_memdev",
+ .release = cxl_memdev_release,
+ .devnode = cxl_memdev_devnode,
+ .groups = cxl_memdev_attribute_groups,
+};
+
+static void cxl_memdev_unregister(void *_cxlmd)
+{
+ struct cxl_memdev *cxlmd = _cxlmd;
+ struct device *dev = &cxlmd->dev;
+ struct cdev *cdev = &cxlmd->cdev;
+ const struct cdevm_file_operations *cdevm_fops;
+
+ cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops);
+ cdevm_fops->shutdown(dev);
+
+ cdev_device_del(&cxlmd->cdev, dev);
+ put_device(dev);
+}
+
+static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
+ const struct file_operations *fops)
+{
+ struct pci_dev *pdev = cxlm->pdev;
+ struct cxl_memdev *cxlmd;
+ struct device *dev;
+ struct cdev *cdev;
+ int rc;
+
+ cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
+ if (!cxlmd)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
+ if (rc < 0)
+ goto err;
+ cxlmd->id = rc;
+
+ dev = &cxlmd->dev;
+ device_initialize(dev);
+ dev->parent = &pdev->dev;
+ dev->bus = &cxl_bus_type;
+ dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
+ dev->type = &cxl_memdev_type;
+ device_set_pm_not_required(dev);
+
+ cdev = &cxlmd->cdev;
+ cdev_init(cdev, fops);
+ return cxlmd;
+
+err:
+ kfree(cxlmd);
+ return ERR_PTR(rc);
+}
+
+struct cxl_memdev *
+devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
+ const struct cdevm_file_operations *cdevm_fops)
+{
+ struct cxl_memdev *cxlmd;
+ struct device *dev;
+ struct cdev *cdev;
+ int rc;
+
+ cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops);
+ if (IS_ERR(cxlmd))
+ return cxlmd;
+
+ dev = &cxlmd->dev;
+ rc = dev_set_name(dev, "mem%d", cxlmd->id);
+ if (rc)
+ goto err;
+
+ /*
+ * Activate ioctl operations, no cxl_memdev_rwsem manipulation
+ * needed as this is ordered with cdev_add() publishing the device.
+ */
+ cxlmd->cxlm = cxlm;
+
+ cdev = &cxlmd->cdev;
+ rc = cdev_device_add(cdev, dev);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
+ if (rc)
+ return ERR_PTR(rc);
+ return cxlmd;
+
+err:
+ /*
+ * The cdev was briefly live, shutdown any ioctl operations that
+ * saw that state.
+ */
+ cdevm_fops->shutdown(dev);
+ put_device(dev);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_memdev);
+
+__init int cxl_memdev_init(void)
+{
+ dev_t devt;
+ int rc;
+
+ rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
+ if (rc)
+ return rc;
+
+ cxl_mem_major = MAJOR(devt);
+
+ return 0;
+}
+
+void cxl_memdev_exit(void)
+{
+ unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
+}
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
new file mode 100644
index 000000000000..d24570f5b8ba
--- /dev/null
+++ b/drivers/cxl/core/pmem.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <cxlmem.h>
+#include <cxl.h>
+#include "core.h"
+
+/**
+ * DOC: cxl pmem
+ *
+ * The core CXL PMEM infrastructure supports persistent memory
+ * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL
+ * 'bridge' device is added at the root of a CXL device topology if
+ * platform firmware advertises at least one persistent memory capable
+ * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus'
+ * device. Then for each cxl_memdev in the CXL device topology a bridge
+ * device is added to host a LIBNVDIMM dimm object. When these bridges
+ * are registered native LIBNVDIMM uapis are translated to CXL
+ * operations, for example, namespace label access commands.
+ */
+
+static void cxl_nvdimm_bridge_release(struct device *dev)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
+
+ kfree(cxl_nvb);
+}
+
+static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+const struct device_type cxl_nvdimm_bridge_type = {
+ .name = "cxl_nvdimm_bridge",
+ .release = cxl_nvdimm_bridge_release,
+ .groups = cxl_nvdimm_bridge_attribute_groups,
+};
+
+struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
+ "not a cxl_nvdimm_bridge device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_nvdimm_bridge, dev);
+}
+EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
+
+static struct cxl_nvdimm_bridge *
+cxl_nvdimm_bridge_alloc(struct cxl_port *port)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct device *dev;
+
+ cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
+ if (!cxl_nvb)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &cxl_nvb->dev;
+ cxl_nvb->port = port;
+ cxl_nvb->state = CXL_NVB_NEW;
+ device_initialize(dev);
+ device_set_pm_not_required(dev);
+ dev->parent = &port->dev;
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_nvdimm_bridge_type;
+
+ return cxl_nvb;
+}
+
+static void unregister_nvb(void *_cxl_nvb)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
+ bool flush;
+
+ /*
+ * If the bridge was ever activated then there might be in-flight state
+ * work to flush. Once the state has been changed to 'dead' then no new
+ * work can be queued by user-triggered bind.
+ */
+ device_lock(&cxl_nvb->dev);
+ flush = cxl_nvb->state != CXL_NVB_NEW;
+ cxl_nvb->state = CXL_NVB_DEAD;
+ device_unlock(&cxl_nvb->dev);
+
+ /*
+ * Even though the device core will trigger device_release_driver()
+ * before the unregister, it does not know about the fact that
+ * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
+ * release not and flush it before tearing down the nvdimm device
+ * hierarchy.
+ */
+ device_release_driver(&cxl_nvb->dev);
+ if (flush)
+ flush_work(&cxl_nvb->state_work);
+ device_unregister(&cxl_nvb->dev);
+}
+
+/**
+ * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
+ * @host: platform firmware root device
+ * @port: CXL port at the root of a CXL topology
+ *
+ * Return: bridge device that can host cxl_nvdimm objects
+ */
+struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
+ struct cxl_port *port)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct device *dev;
+ int rc;
+
+ if (!IS_ENABLED(CONFIG_CXL_PMEM))
+ return ERR_PTR(-ENXIO);
+
+ cxl_nvb = cxl_nvdimm_bridge_alloc(port);
+ if (IS_ERR(cxl_nvb))
+ return cxl_nvb;
+
+ dev = &cxl_nvb->dev;
+ rc = dev_set_name(dev, "nvdimm-bridge");
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return cxl_nvb;
+
+err:
+ put_device(dev);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
+
+static void cxl_nvdimm_release(struct device *dev)
+{
+ struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
+
+ kfree(cxl_nvd);
+}
+
+static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+const struct device_type cxl_nvdimm_type = {
+ .name = "cxl_nvdimm",
+ .release = cxl_nvdimm_release,
+ .groups = cxl_nvdimm_attribute_groups,
+};
+
+bool is_cxl_nvdimm(struct device *dev)
+{
+ return dev->type == &cxl_nvdimm_type;
+}
+EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
+
+struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
+ "not a cxl_nvdimm device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_nvdimm, dev);
+}
+EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
+
+static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
+{
+ struct cxl_nvdimm *cxl_nvd;
+ struct device *dev;
+
+ cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
+ if (!cxl_nvd)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &cxl_nvd->dev;
+ cxl_nvd->cxlmd = cxlmd;
+ device_initialize(dev);
+ device_set_pm_not_required(dev);
+ dev->parent = &cxlmd->dev;
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_nvdimm_type;
+
+ return cxl_nvd;
+}
+
+/**
+ * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
+ * @host: same host as @cxlmd
+ * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
+ *
+ * Return: 0 on success negative error code on failure.
+ */
+int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
+{
+ struct cxl_nvdimm *cxl_nvd;
+ struct device *dev;
+ int rc;
+
+ cxl_nvd = cxl_nvdimm_alloc(cxlmd);
+ if (IS_ERR(cxl_nvd))
+ return PTR_ERR(cxl_nvd);
+
+ dev = &cxl_nvd->dev;
+ rc = dev_set_name(dev, "pmem%d", cxlmd->id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
+ dev_name(dev));
+
+ return devm_add_action_or_reset(host, unregister_cxl_dev, dev);
+
+err:
+ put_device(dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
new file mode 100644
index 000000000000..41de4a136ecd
--- /dev/null
+++ b/drivers/cxl/core/regs.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. */
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <cxlmem.h>
+
+/**
+ * DOC: cxl registers
+ *
+ * CXL device capabilities are enumerated by PCI DVSEC (Designated
+ * Vendor-specific) and / or descriptors provided by platform firmware.
+ * They can be defined as a set like the device and component registers
+ * mandated by CXL Section 8.1.12.2 Memory Device PCIe Capabilities and
+ * Extended Capabilities, or they can be individual capabilities
+ * appended to bridged and endpoint devices.
+ *
+ * Provide common infrastructure for enumerating and mapping these
+ * discrete capabilities.
+ */
+
+/**
+ * cxl_probe_component_regs() - Detect CXL Component register blocks
+ * @dev: Host device of the @base mapping
+ * @base: Mapping containing the HDM Decoder Capability Header
+ * @map: Map object describing the register block information found
+ *
+ * See CXL 2.0 8.2.4 Component Register Layout and Definition
+ * See CXL 2.0 8.2.5.5 CXL Device Register Interface
+ *
+ * Probe for component register information and return it in map object.
+ */
+void cxl_probe_component_regs(struct device *dev, void __iomem *base,
+ struct cxl_component_reg_map *map)
+{
+ int cap, cap_count;
+ u64 cap_array;
+
+ *map = (struct cxl_component_reg_map) { 0 };
+
+ /*
+ * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
+ * CXL 2.0 8.2.4 Table 141.
+ */
+ base += CXL_CM_OFFSET;
+
+ cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
+
+ if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
+ dev_err(dev,
+ "Couldn't locate the CXL.cache and CXL.mem capability array header./n");
+ return;
+ }
+
+ /* It's assumed that future versions will be backward compatible */
+ cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
+
+ for (cap = 1; cap <= cap_count; cap++) {
+ void __iomem *register_block;
+ u32 hdr;
+ int decoder_cnt;
+ u16 cap_id, offset;
+ u32 length;
+
+ hdr = readl(base + cap * 0x4);
+
+ cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
+ offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
+ register_block = base + offset;
+
+ switch (cap_id) {
+ case CXL_CM_CAP_CAP_ID_HDM:
+ dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
+ offset);
+
+ hdr = readl(register_block);
+
+ decoder_cnt = cxl_hdm_decoder_count(hdr);
+ length = 0x20 * decoder_cnt + 0x10;
+
+ map->hdm_decoder.valid = true;
+ map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
+ map->hdm_decoder.size = length;
+ break;
+ default:
+ dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
+ offset);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
+
+/**
+ * cxl_probe_device_regs() - Detect CXL Device register blocks
+ * @dev: Host device of the @base mapping
+ * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
+ * @map: Map object describing the register block information found
+ *
+ * Probe for device register information and return it in map object.
+ */
+void cxl_probe_device_regs(struct device *dev, void __iomem *base,
+ struct cxl_device_reg_map *map)
+{
+ int cap, cap_count;
+ u64 cap_array;
+
+ *map = (struct cxl_device_reg_map){ 0 };
+
+ cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
+ if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
+ CXLDEV_CAP_ARRAY_CAP_ID)
+ return;
+
+ cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
+
+ for (cap = 1; cap <= cap_count; cap++) {
+ u32 offset, length;
+ u16 cap_id;
+
+ cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
+ readl(base + cap * 0x10));
+ offset = readl(base + cap * 0x10 + 0x4);
+ length = readl(base + cap * 0x10 + 0x8);
+
+ switch (cap_id) {
+ case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
+ dev_dbg(dev, "found Status capability (0x%x)\n", offset);
+
+ map->status.valid = true;
+ map->status.offset = offset;
+ map->status.size = length;
+ break;
+ case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
+ dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
+ map->mbox.valid = true;
+ map->mbox.offset = offset;
+ map->mbox.size = length;
+ break;
+ case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
+ dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
+ break;
+ case CXLDEV_CAP_CAP_ID_MEMDEV:
+ dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
+ map->memdev.valid = true;
+ map->memdev.offset = offset;
+ map->memdev.size = length;
+ break;
+ default:
+ if (cap_id >= 0x8000)
+ dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
+ else
+ dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
+
+static void __iomem *devm_cxl_iomap_block(struct device *dev,
+ resource_size_t addr,
+ resource_size_t length)
+{
+ void __iomem *ret_val;
+ struct resource *res;
+
+ res = devm_request_mem_region(dev, addr, length, dev_name(dev));
+ if (!res) {
+ resource_size_t end = addr + length - 1;
+
+ dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
+ return NULL;
+ }
+
+ ret_val = devm_ioremap(dev, addr, length);
+ if (!ret_val)
+ dev_err(dev, "Failed to map region %pr\n", res);
+
+ return ret_val;
+}
+
+int cxl_map_component_regs(struct pci_dev *pdev,
+ struct cxl_component_regs *regs,
+ struct cxl_register_map *map)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t phys_addr;
+ resource_size_t length;
+
+ phys_addr = pci_resource_start(pdev, map->barno);
+ phys_addr += map->block_offset;
+
+ phys_addr += map->component_map.hdm_decoder.offset;
+ length = map->component_map.hdm_decoder.size;
+ regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
+ if (!regs->hdm_decoder)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_map_component_regs);
+
+int cxl_map_device_regs(struct pci_dev *pdev,
+ struct cxl_device_regs *regs,
+ struct cxl_register_map *map)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t phys_addr;
+
+ phys_addr = pci_resource_start(pdev, map->barno);
+ phys_addr += map->block_offset;
+
+ if (map->device_map.status.valid) {
+ resource_size_t addr;
+ resource_size_t length;
+
+ addr = phys_addr + map->device_map.status.offset;
+ length = map->device_map.status.size;
+ regs->status = devm_cxl_iomap_block(dev, addr, length);
+ if (!regs->status)
+ return -ENOMEM;
+ }
+
+ if (map->device_map.mbox.valid) {
+ resource_size_t addr;
+ resource_size_t length;
+
+ addr = phys_addr + map->device_map.mbox.offset;
+ length = map->device_map.mbox.size;
+ regs->mbox = devm_cxl_iomap_block(dev, addr, length);
+ if (!regs->mbox)
+ return -ENOMEM;
+ }
+
+ if (map->device_map.memdev.valid) {
+ resource_size_t addr;
+ resource_size_t length;
+
+ addr = phys_addr + map->device_map.memdev.offset;
+ length = map->device_map.memdev.size;
+ regs->memdev = devm_cxl_iomap_block(dev, addr, length);
+ if (!regs->memdev)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_map_device_regs);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index b6bda39a59e3..53927f9fa77e 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -140,7 +140,6 @@ struct cxl_device_reg_map {
};
struct cxl_register_map {
- struct list_head list;
u64 block_offset;
u8 reg_type;
u8 barno;
diff --git a/drivers/cxl/mem.h b/drivers/cxl/cxlmem.h
index 8f02d02b26b4..6c0b1e2ea97c 100644
--- a/drivers/cxl/mem.h
+++ b/drivers/cxl/cxlmem.h
@@ -28,11 +28,20 @@
(FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \
CXLMDEV_RESET_NEEDED_NOT)
-/*
- * An entire PCI topology full of devices should be enough for any
- * config
+/**
+ * struct cdevm_file_operations - devm coordinated cdev file operations
+ * @fops: file operations that are synchronized against @shutdown
+ * @shutdown: disconnect driver data
+ *
+ * @shutdown is invoked in the devres release path to disconnect any
+ * driver instance data from @dev. It assumes synchronization with any
+ * fops operation that requires driver data. After @shutdown an
+ * operation may only reference @device data.
*/
-#define CXL_MEM_MAX_DEVS 65536
+struct cdevm_file_operations {
+ struct file_operations fops;
+ void (*shutdown)(struct device *dev);
+};
/**
* struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
@@ -48,6 +57,15 @@ struct cxl_memdev {
int id;
};
+static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
+{
+ return container_of(dev, struct cxl_memdev, dev);
+}
+
+struct cxl_memdev *
+devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
+ const struct cdevm_file_operations *cdevm_fops);
+
/**
* struct cxl_mem - A CXL memory device
* @pdev: The PCI device associated with this CXL device.
@@ -77,5 +95,14 @@ struct cxl_mem {
struct range pmem_range;
struct range ram_range;
+ u64 total_bytes;
+ u64 volatile_only_bytes;
+ u64 persistent_only_bytes;
+ u64 partition_align_bytes;
+
+ u64 active_volatile_bytes;
+ u64 active_persistent_bytes;
+ u64 next_volatile_bytes;
+ u64 next_persistent_bytes;
};
#endif /* __CXL_MEM_H__ */
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 4cf351a3cf99..8e45aa07d662 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -12,9 +12,9 @@
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include "cxlmem.h"
#include "pci.h"
#include "cxl.h"
-#include "mem.h"
/**
* DOC: cxl pci
@@ -64,6 +64,15 @@ enum opcode {
CXL_MBOX_OP_MAX = 0x10000
};
+/*
+ * CXL 2.0 - Memory capacity multiplier
+ * See Section 8.2.9.5
+ *
+ * Volatile, Persistent, and Partition capacities are specified to be in
+ * multiples of 256MB - define a multiplier to convert to/from bytes.
+ */
+#define CXL_CAPACITY_MULTIPLIER SZ_256M
+
/**
* struct mbox_cmd - A command to be submitted to hardware.
* @opcode: (input) The command set and command submitted to hardware.
@@ -94,8 +103,6 @@ struct mbox_cmd {
#define CXL_MBOX_SUCCESS 0
};
-static int cxl_mem_major;
-static DEFINE_IDA(cxl_memdev_ida);
static DECLARE_RWSEM(cxl_memdev_rwsem);
static struct dentry *cxl_debugfs;
static bool cxl_raw_allow_all;
@@ -568,7 +575,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
return false;
- if (security_locked_down(LOCKDOWN_NONE))
+ if (security_locked_down(LOCKDOWN_PCI_ACCESS))
return false;
if (cxl_raw_allow_all)
@@ -806,13 +813,25 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file)
return 0;
}
-static const struct file_operations cxl_memdev_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = cxl_memdev_ioctl,
- .open = cxl_memdev_open,
- .release = cxl_memdev_release_file,
- .compat_ioctl = compat_ptr_ioctl,
- .llseek = noop_llseek,
+static void cxl_memdev_shutdown(struct device *dev)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+
+ down_write(&cxl_memdev_rwsem);
+ cxlmd->cxlm = NULL;
+ up_write(&cxl_memdev_rwsem);
+}
+
+static const struct cdevm_file_operations cxl_memdev_fops = {
+ .fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = cxl_memdev_ioctl,
+ .open = cxl_memdev_open,
+ .release = cxl_memdev_release_file,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = noop_llseek,
+ },
+ .shutdown = cxl_memdev_shutdown,
};
static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
@@ -1022,8 +1041,8 @@ static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base,
!dev_map->memdev.valid) {
dev_err(dev, "registers not found: %s%s%s\n",
!dev_map->status.valid ? "status " : "",
- !dev_map->mbox.valid ? "status " : "",
- !dev_map->memdev.valid ? "status " : "");
+ !dev_map->mbox.valid ? "mbox " : "",
+ !dev_map->memdev.valid ? "memdev " : "");
return -ENXIO;
}
@@ -1081,9 +1100,8 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
struct device *dev = &pdev->dev;
u32 regloc_size, regblocks;
void __iomem *base;
- int regloc, i;
- struct cxl_register_map *map, *n;
- LIST_HEAD(register_maps);
+ int regloc, i, n_maps;
+ struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES];
int ret = 0;
regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID);
@@ -1102,20 +1120,12 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET;
regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8;
- for (i = 0; i < regblocks; i++, regloc += 8) {
+ for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) {
u32 reg_lo, reg_hi;
u8 reg_type;
u64 offset;
u8 bar;
- map = kzalloc(sizeof(*map), GFP_KERNEL);
- if (!map) {
- ret = -ENOMEM;
- goto free_maps;
- }
-
- list_add(&map->list, &register_maps);
-
pci_read_config_dword(pdev, regloc, &reg_lo);
pci_read_config_dword(pdev, regloc + 4, &reg_hi);
@@ -1125,12 +1135,15 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n",
bar, offset, reg_type);
+ /* Ignore unknown register block types */
+ if (reg_type > CXL_REGLOC_RBI_MEMDEV)
+ continue;
+
base = cxl_mem_map_regblock(cxlm, bar, offset);
- if (!base) {
- ret = -ENOMEM;
- goto free_maps;
- }
+ if (!base)
+ return -ENOMEM;
+ map = &maps[n_maps];
map->barno = bar;
map->block_offset = offset;
map->reg_type = reg_type;
@@ -1141,240 +1154,22 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
cxl_mem_unmap_regblock(cxlm, base);
if (ret)
- goto free_maps;
+ return ret;
+
+ n_maps++;
}
pci_release_mem_regions(pdev);
- list_for_each_entry(map, &register_maps, list) {
- ret = cxl_map_regs(cxlm, map);
+ for (i = 0; i < n_maps; i++) {
+ ret = cxl_map_regs(cxlm, &maps[i]);
if (ret)
- goto free_maps;
- }
-
-free_maps:
- list_for_each_entry_safe(map, n, &register_maps, list) {
- list_del(&map->list);
- kfree(map);
+ break;
}
return ret;
}
-static struct cxl_memdev *to_cxl_memdev(struct device *dev)
-{
- return container_of(dev, struct cxl_memdev, dev);
-}
-
-static void cxl_memdev_release(struct device *dev)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-
- ida_free(&cxl_memdev_ida, cxlmd->id);
- kfree(cxlmd);
-}
-
-static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
- kgid_t *gid)
-{
- return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
-}
-
-static ssize_t firmware_version_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
-
- return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
-}
-static DEVICE_ATTR_RO(firmware_version);
-
-static ssize_t payload_max_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
-
- return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
-}
-static DEVICE_ATTR_RO(payload_max);
-
-static ssize_t label_storage_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
-
- return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
-}
-static DEVICE_ATTR_RO(label_storage_size);
-
-static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
- unsigned long long len = range_len(&cxlm->ram_range);
-
- return sysfs_emit(buf, "%#llx\n", len);
-}
-
-static struct device_attribute dev_attr_ram_size =
- __ATTR(size, 0444, ram_size_show, NULL);
-
-static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
- unsigned long long len = range_len(&cxlm->pmem_range);
-
- return sysfs_emit(buf, "%#llx\n", len);
-}
-
-static struct device_attribute dev_attr_pmem_size =
- __ATTR(size, 0444, pmem_size_show, NULL);
-
-static struct attribute *cxl_memdev_attributes[] = {
- &dev_attr_firmware_version.attr,
- &dev_attr_payload_max.attr,
- &dev_attr_label_storage_size.attr,
- NULL,
-};
-
-static struct attribute *cxl_memdev_pmem_attributes[] = {
- &dev_attr_pmem_size.attr,
- NULL,
-};
-
-static struct attribute *cxl_memdev_ram_attributes[] = {
- &dev_attr_ram_size.attr,
- NULL,
-};
-
-static struct attribute_group cxl_memdev_attribute_group = {
- .attrs = cxl_memdev_attributes,
-};
-
-static struct attribute_group cxl_memdev_ram_attribute_group = {
- .name = "ram",
- .attrs = cxl_memdev_ram_attributes,
-};
-
-static struct attribute_group cxl_memdev_pmem_attribute_group = {
- .name = "pmem",
- .attrs = cxl_memdev_pmem_attributes,
-};
-
-static const struct attribute_group *cxl_memdev_attribute_groups[] = {
- &cxl_memdev_attribute_group,
- &cxl_memdev_ram_attribute_group,
- &cxl_memdev_pmem_attribute_group,
- NULL,
-};
-
-static const struct device_type cxl_memdev_type = {
- .name = "cxl_memdev",
- .release = cxl_memdev_release,
- .devnode = cxl_memdev_devnode,
- .groups = cxl_memdev_attribute_groups,
-};
-
-static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd)
-{
- down_write(&cxl_memdev_rwsem);
- cxlmd->cxlm = NULL;
- up_write(&cxl_memdev_rwsem);
-}
-
-static void cxl_memdev_unregister(void *_cxlmd)
-{
- struct cxl_memdev *cxlmd = _cxlmd;
- struct device *dev = &cxlmd->dev;
-
- cdev_device_del(&cxlmd->cdev, dev);
- cxl_memdev_shutdown(cxlmd);
- put_device(dev);
-}
-
-static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm)
-{
- struct pci_dev *pdev = cxlm->pdev;
- struct cxl_memdev *cxlmd;
- struct device *dev;
- struct cdev *cdev;
- int rc;
-
- cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
- if (!cxlmd)
- return ERR_PTR(-ENOMEM);
-
- rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
- if (rc < 0)
- goto err;
- cxlmd->id = rc;
-
- dev = &cxlmd->dev;
- device_initialize(dev);
- dev->parent = &pdev->dev;
- dev->bus = &cxl_bus_type;
- dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
- dev->type = &cxl_memdev_type;
- device_set_pm_not_required(dev);
-
- cdev = &cxlmd->cdev;
- cdev_init(cdev, &cxl_memdev_fops);
- return cxlmd;
-
-err:
- kfree(cxlmd);
- return ERR_PTR(rc);
-}
-
-static struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
- struct cxl_mem *cxlm)
-{
- struct cxl_memdev *cxlmd;
- struct device *dev;
- struct cdev *cdev;
- int rc;
-
- cxlmd = cxl_memdev_alloc(cxlm);
- if (IS_ERR(cxlmd))
- return cxlmd;
-
- dev = &cxlmd->dev;
- rc = dev_set_name(dev, "mem%d", cxlmd->id);
- if (rc)
- goto err;
-
- /*
- * Activate ioctl operations, no cxl_memdev_rwsem manipulation
- * needed as this is ordered with cdev_add() publishing the device.
- */
- cxlmd->cxlm = cxlm;
-
- cdev = &cxlmd->cdev;
- rc = cdev_device_add(cdev, dev);
- if (rc)
- goto err;
-
- rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
- if (rc)
- return ERR_PTR(rc);
- return cxlmd;
-
-err:
- /*
- * The cdev was briefly live, shutdown any ioctl operations that
- * saw that state.
- */
- cxl_memdev_shutdown(cxlmd);
- put_device(dev);
- return ERR_PTR(rc);
-}
-
static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
{
u32 remaining = size;
@@ -1469,6 +1264,53 @@ static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm)
}
/**
+ * cxl_mem_get_partition_info - Get partition info
+ * @cxlm: The device to act on
+ * @active_volatile_bytes: returned active volatile capacity
+ * @active_persistent_bytes: returned active persistent capacity
+ * @next_volatile_bytes: return next volatile capacity
+ * @next_persistent_bytes: return next persistent capacity
+ *
+ * Retrieve the current partition info for the device specified. If not 0, the
+ * 'next' values are pending and take affect on next cold reset.
+ *
+ * Return: 0 if no error: or the result of the mailbox command.
+ *
+ * See CXL @8.2.9.5.2.1 Get Partition Info
+ */
+static int cxl_mem_get_partition_info(struct cxl_mem *cxlm,
+ u64 *active_volatile_bytes,
+ u64 *active_persistent_bytes,
+ u64 *next_volatile_bytes,
+ u64 *next_persistent_bytes)
+{
+ struct cxl_mbox_get_partition_info {
+ __le64 active_volatile_cap;
+ __le64 active_persistent_cap;
+ __le64 next_volatile_cap;
+ __le64 next_persistent_cap;
+ } __packed pi;
+ int rc;
+
+ rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO,
+ NULL, 0, &pi, sizeof(pi));
+ if (rc)
+ return rc;
+
+ *active_volatile_bytes = le64_to_cpu(pi.active_volatile_cap);
+ *active_persistent_bytes = le64_to_cpu(pi.active_persistent_cap);
+ *next_volatile_bytes = le64_to_cpu(pi.next_volatile_cap);
+ *next_persistent_bytes = le64_to_cpu(pi.next_volatile_cap);
+
+ *active_volatile_bytes *= CXL_CAPACITY_MULTIPLIER;
+ *active_persistent_bytes *= CXL_CAPACITY_MULTIPLIER;
+ *next_volatile_bytes *= CXL_CAPACITY_MULTIPLIER;
+ *next_persistent_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+ return 0;
+}
+
+/**
* cxl_mem_enumerate_cmds() - Enumerate commands for a device.
* @cxlm: The device.
*
@@ -1564,16 +1406,27 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
if (rc < 0)
return rc;
- /*
- * TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias.
- * For now, only the capacity is exported in sysfs
- */
- cxlm->ram_range.start = 0;
- cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1;
+ cxlm->total_bytes = le64_to_cpu(id.total_capacity);
+ cxlm->total_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+ cxlm->volatile_only_bytes = le64_to_cpu(id.volatile_capacity);
+ cxlm->volatile_only_bytes *= CXL_CAPACITY_MULTIPLIER;
- cxlm->pmem_range.start = 0;
- cxlm->pmem_range.end =
- le64_to_cpu(id.persistent_capacity) * SZ_256M - 1;
+ cxlm->persistent_only_bytes = le64_to_cpu(id.persistent_capacity);
+ cxlm->persistent_only_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+ cxlm->partition_align_bytes = le64_to_cpu(id.partition_align);
+ cxlm->partition_align_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+ dev_dbg(&cxlm->pdev->dev, "Identify Memory Device\n"
+ " total_bytes = %#llx\n"
+ " volatile_only_bytes = %#llx\n"
+ " persistent_only_bytes = %#llx\n"
+ " partition_align_bytes = %#llx\n",
+ cxlm->total_bytes,
+ cxlm->volatile_only_bytes,
+ cxlm->persistent_only_bytes,
+ cxlm->partition_align_bytes);
cxlm->lsa_size = le32_to_cpu(id.lsa_size);
memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
@@ -1581,6 +1434,49 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
return 0;
}
+static int cxl_mem_create_range_info(struct cxl_mem *cxlm)
+{
+ int rc;
+
+ if (cxlm->partition_align_bytes == 0) {
+ cxlm->ram_range.start = 0;
+ cxlm->ram_range.end = cxlm->volatile_only_bytes - 1;
+ cxlm->pmem_range.start = cxlm->volatile_only_bytes;
+ cxlm->pmem_range.end = cxlm->volatile_only_bytes +
+ cxlm->persistent_only_bytes - 1;
+ return 0;
+ }
+
+ rc = cxl_mem_get_partition_info(cxlm,
+ &cxlm->active_volatile_bytes,
+ &cxlm->active_persistent_bytes,
+ &cxlm->next_volatile_bytes,
+ &cxlm->next_persistent_bytes);
+ if (rc < 0) {
+ dev_err(&cxlm->pdev->dev, "Failed to query partition information\n");
+ return rc;
+ }
+
+ dev_dbg(&cxlm->pdev->dev, "Get Partition Info\n"
+ " active_volatile_bytes = %#llx\n"
+ " active_persistent_bytes = %#llx\n"
+ " next_volatile_bytes = %#llx\n"
+ " next_persistent_bytes = %#llx\n",
+ cxlm->active_volatile_bytes,
+ cxlm->active_persistent_bytes,
+ cxlm->next_volatile_bytes,
+ cxlm->next_persistent_bytes);
+
+ cxlm->ram_range.start = 0;
+ cxlm->ram_range.end = cxlm->active_volatile_bytes - 1;
+
+ cxlm->pmem_range.start = cxlm->active_volatile_bytes;
+ cxlm->pmem_range.end = cxlm->active_volatile_bytes +
+ cxlm->active_persistent_bytes - 1;
+
+ return 0;
+}
+
static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct cxl_memdev *cxlmd;
@@ -1611,7 +1507,11 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm);
+ rc = cxl_mem_create_range_info(cxlm);
+ if (rc)
+ return rc;
+
+ cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm, &cxl_memdev_fops);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
@@ -1640,25 +1540,15 @@ static struct pci_driver cxl_mem_driver = {
static __init int cxl_mem_init(void)
{
struct dentry *mbox_debugfs;
- dev_t devt;
int rc;
/* Double check the anonymous union trickery in struct cxl_regs */
BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
offsetof(struct cxl_regs, device_regs.memdev));
- rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
- if (rc)
- return rc;
-
- cxl_mem_major = MAJOR(devt);
-
rc = pci_register_driver(&cxl_mem_driver);
- if (rc) {
- unregister_chrdev_region(MKDEV(cxl_mem_major, 0),
- CXL_MEM_MAX_DEVS);
+ if (rc)
return rc;
- }
cxl_debugfs = debugfs_create_dir("cxl", NULL);
mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
@@ -1672,7 +1562,6 @@ static __exit void cxl_mem_exit(void)
{
debugfs_remove_recursive(cxl_debugfs);
pci_unregister_driver(&cxl_mem_driver);
- unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
}
MODULE_LICENSE("GPL v2");
diff --git a/drivers/cxl/pci.h b/drivers/cxl/pci.h
index dad7a831f65f..8c1a58813816 100644
--- a/drivers/cxl/pci.h
+++ b/drivers/cxl/pci.h
@@ -25,6 +25,7 @@
#define CXL_REGLOC_RBI_COMPONENT 1
#define CXL_REGLOC_RBI_VIRT 2
#define CXL_REGLOC_RBI_MEMDEV 3
+#define CXL_REGLOC_RBI_TYPES CXL_REGLOC_RBI_MEMDEV + 1
#define CXL_REGLOC_ADDR_MASK GENMASK(31, 16)
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 0088e41dd2f3..9652c3ee41e7 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -6,7 +6,7 @@
#include <linux/ndctl.h>
#include <linux/async.h>
#include <linux/slab.h>
-#include "mem.h"
+#include "cxlmem.h"
#include "cxl.h"
/*
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index ac231cc36359..a37622060fff 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -37,15 +37,16 @@ static int dax_kmem_range(struct dev_dax *dev_dax, int i, struct range *r)
struct dax_kmem_data {
const char *res_name;
+ int mgid;
struct resource *res[];
};
static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
+ unsigned long total_len = 0;
struct dax_kmem_data *data;
- int rc = -ENOMEM;
- int i, mapped = 0;
+ int i, rc, mapped = 0;
int numa_node;
/*
@@ -61,24 +62,44 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
return -EINVAL;
}
+ for (i = 0; i < dev_dax->nr_range; i++) {
+ struct range range;
+
+ rc = dax_kmem_range(dev_dax, i, &range);
+ if (rc) {
+ dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
+ i, range.start, range.end);
+ continue;
+ }
+ total_len += range_len(&range);
+ }
+
+ if (!total_len) {
+ dev_warn(dev, "rejecting DAX region without any memory after alignment\n");
+ return -EINVAL;
+ }
+
data = kzalloc(struct_size(data, res, dev_dax->nr_range), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ rc = -ENOMEM;
data->res_name = kstrdup(dev_name(dev), GFP_KERNEL);
if (!data->res_name)
goto err_res_name;
+ rc = memory_group_register_static(numa_node, total_len);
+ if (rc < 0)
+ goto err_reg_mgid;
+ data->mgid = rc;
+
for (i = 0; i < dev_dax->nr_range; i++) {
struct resource *res;
struct range range;
rc = dax_kmem_range(dev_dax, i, &range);
- if (rc) {
- dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
- i, range.start, range.end);
+ if (rc)
continue;
- }
/* Region is permanently reserved if hotremove fails. */
res = request_mem_region(range.start, range_len(&range), data->res_name);
@@ -108,8 +129,8 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
* Ensure that future kexec'd kernels will not treat
* this as RAM automatically.
*/
- rc = add_memory_driver_managed(numa_node, range.start,
- range_len(&range), kmem_name, MHP_NONE);
+ rc = add_memory_driver_managed(data->mgid, range.start,
+ range_len(&range), kmem_name, MHP_NID_IS_MGID);
if (rc) {
dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
@@ -129,6 +150,8 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
return 0;
err_request_mem:
+ memory_group_unregister(data->mgid);
+err_reg_mgid:
kfree(data->res_name);
err_res_name:
kfree(data);
@@ -156,8 +179,7 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
if (rc)
continue;
- rc = remove_memory(dev_dax->target_node, range.start,
- range_len(&range));
+ rc = remove_memory(range.start, range_len(&range));
if (rc == 0) {
release_resource(data->res[i]);
kfree(data->res[i]);
@@ -172,6 +194,7 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
}
if (success >= dev_dax->nr_range) {
+ memory_group_unregister(data->mgid);
kfree(data->res_name);
kfree(data);
dev_set_drvdata(dev, NULL);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 44736cbd446e..fc89e91beea7 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -17,6 +17,24 @@
#include <linux/fs.h>
#include "dax-private.h"
+/**
+ * struct dax_device - anchor object for dax services
+ * @inode: core vfs
+ * @cdev: optional character interface for "device dax"
+ * @host: optional name for lookups where the device path is not available
+ * @private: dax driver private data
+ * @flags: state and boolean properties
+ */
+struct dax_device {
+ struct hlist_node list;
+ struct inode inode;
+ struct cdev cdev;
+ const char *host;
+ void *private;
+ unsigned long flags;
+ const struct dax_operations *ops;
+};
+
static dev_t dax_devt;
DEFINE_STATIC_SRCU(dax_srcu);
static struct vfsmount *dax_mnt;
@@ -40,6 +58,42 @@ void dax_read_unlock(int id)
}
EXPORT_SYMBOL_GPL(dax_read_unlock);
+static int dax_host_hash(const char *host)
+{
+ return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
+}
+
+/**
+ * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
+ * @host: alternate name for the device registered by a dax driver
+ */
+static struct dax_device *dax_get_by_host(const char *host)
+{
+ struct dax_device *dax_dev, *found = NULL;
+ int hash, id;
+
+ if (!host)
+ return NULL;
+
+ hash = dax_host_hash(host);
+
+ id = dax_read_lock();
+ spin_lock(&dax_host_lock);
+ hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
+ if (!dax_alive(dax_dev)
+ || strcmp(host, dax_dev->host) != 0)
+ continue;
+
+ if (igrab(&dax_dev->inode))
+ found = dax_dev;
+ break;
+ }
+ spin_unlock(&dax_host_lock);
+ dax_read_unlock(id);
+
+ return found;
+}
+
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
@@ -65,15 +119,13 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
return dax_get_by_host(bdev->bd_disk->disk_name);
}
EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
-#endif
-bool __generic_fsdax_supported(struct dax_device *dax_dev,
+bool generic_fsdax_supported(struct dax_device *dax_dev,
struct block_device *bdev, int blocksize, sector_t start,
sector_t sectors)
{
bool dax_enabled = false;
pgoff_t pgoff, pgoff_end;
- char buf[BDEVNAME_SIZE];
void *kaddr, *end_kaddr;
pfn_t pfn, end_pfn;
sector_t last_page;
@@ -81,29 +133,25 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
int err, id;
if (blocksize != PAGE_SIZE) {
- pr_info("%s: error: unsupported blocksize for dax\n",
- bdevname(bdev, buf));
+ pr_info("%pg: error: unsupported blocksize for dax\n", bdev);
return false;
}
if (!dax_dev) {
- pr_debug("%s: error: dax unsupported by block device\n",
- bdevname(bdev, buf));
+ pr_debug("%pg: error: dax unsupported by block device\n", bdev);
return false;
}
err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
if (err) {
- pr_info("%s: error: unaligned partition for dax\n",
- bdevname(bdev, buf));
+ pr_info("%pg: error: unaligned partition for dax\n", bdev);
return false;
}
last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
if (err) {
- pr_info("%s: error: unaligned partition for dax\n",
- bdevname(bdev, buf));
+ pr_info("%pg: error: unaligned partition for dax\n", bdev);
return false;
}
@@ -112,8 +160,8 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
if (len < 1 || len2 < 1) {
- pr_info("%s: error: dax access failed (%ld)\n",
- bdevname(bdev, buf), len < 1 ? len : len2);
+ pr_info("%pg: error: dax access failed (%ld)\n",
+ bdev, len < 1 ? len : len2);
dax_read_unlock(id);
return false;
}
@@ -147,57 +195,32 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
dax_read_unlock(id);
if (!dax_enabled) {
- pr_info("%s: error: dax support not enabled\n",
- bdevname(bdev, buf));
+ pr_info("%pg: error: dax support not enabled\n", bdev);
return false;
}
return true;
}
-EXPORT_SYMBOL_GPL(__generic_fsdax_supported);
+EXPORT_SYMBOL_GPL(generic_fsdax_supported);
-/**
- * __bdev_dax_supported() - Check if the device supports dax for filesystem
- * @bdev: block device to check
- * @blocksize: The block size of the device
- *
- * This is a library function for filesystems to check if the block device
- * can be mounted with dax option.
- *
- * Return: true if supported, false if unsupported
- */
-bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
+bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
+ int blocksize, sector_t start, sector_t len)
{
- struct dax_device *dax_dev;
- struct request_queue *q;
- char buf[BDEVNAME_SIZE];
- bool ret;
+ bool ret = false;
int id;
- q = bdev_get_queue(bdev);
- if (!q || !blk_queue_dax(q)) {
- pr_debug("%s: error: request queue doesn't support dax\n",
- bdevname(bdev, buf));
- return false;
- }
-
- dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
- if (!dax_dev) {
- pr_debug("%s: error: device does not support dax\n",
- bdevname(bdev, buf));
+ if (!dax_dev)
return false;
- }
id = dax_read_lock();
- ret = dax_supported(dax_dev, bdev, blocksize, 0,
- i_size_read(bdev->bd_inode) / 512);
+ if (dax_alive(dax_dev) && dax_dev->ops->dax_supported)
+ ret = dax_dev->ops->dax_supported(dax_dev, bdev, blocksize,
+ start, len);
dax_read_unlock(id);
-
- put_dax(dax_dev);
-
return ret;
}
-EXPORT_SYMBOL_GPL(__bdev_dax_supported);
-#endif
+EXPORT_SYMBOL_GPL(dax_supported);
+#endif /* CONFIG_FS_DAX */
+#endif /* CONFIG_BLOCK */
enum dax_device_flags {
/* !alive + rcu grace period == no new operations / mappings */
@@ -208,24 +231,6 @@ enum dax_device_flags {
DAXDEV_SYNC,
};
-/**
- * struct dax_device - anchor object for dax services
- * @inode: core vfs
- * @cdev: optional character interface for "device dax"
- * @host: optional name for lookups where the device path is not available
- * @private: dax driver private data
- * @flags: state and boolean properties
- */
-struct dax_device {
- struct hlist_node list;
- struct inode inode;
- struct cdev cdev;
- const char *host;
- void *private;
- unsigned long flags;
- const struct dax_operations *ops;
-};
-
static ssize_t write_cache_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -323,19 +328,6 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
}
EXPORT_SYMBOL_GPL(dax_direct_access);
-bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
- int blocksize, sector_t start, sector_t len)
-{
- if (!dax_dev)
- return false;
-
- if (!dax_alive(dax_dev))
- return false;
-
- return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
-}
-EXPORT_SYMBOL_GPL(dax_supported);
-
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i)
{
@@ -423,11 +415,6 @@ bool dax_alive(struct dax_device *dax_dev)
}
EXPORT_SYMBOL_GPL(dax_alive);
-static int dax_host_hash(const char *host)
-{
- return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
-}
-
/*
* Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
* that any fault handlers or operations that might have seen
@@ -625,38 +612,6 @@ void put_dax(struct dax_device *dax_dev)
EXPORT_SYMBOL_GPL(put_dax);
/**
- * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
- * @host: alternate name for the device registered by a dax driver
- */
-struct dax_device *dax_get_by_host(const char *host)
-{
- struct dax_device *dax_dev, *found = NULL;
- int hash, id;
-
- if (!host)
- return NULL;
-
- hash = dax_host_hash(host);
-
- id = dax_read_lock();
- spin_lock(&dax_host_lock);
- hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
- if (!dax_alive(dax_dev)
- || strcmp(host, dax_dev->host) != 0)
- continue;
-
- if (igrab(&dax_dev->inode))
- found = dax_dev;
- break;
- }
- spin_unlock(&dax_host_lock);
- dax_read_unlock(id);
-
- return found;
-}
-EXPORT_SYMBOL_GPL(dax_get_by_host);
-
-/**
* inode_dax: convert a public inode into its dax_dev
* @inode: An inode with i_cdev pointing to a dax_dev
*
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 28f3e0ba6cdd..85faa7a5c7d1 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -27,6 +27,7 @@
#include <linux/hrtimer.h>
#include <linux/of.h>
#include <linux/pm_qos.h>
+#include <linux/units.h>
#include "governor.h"
#define CREATE_TRACE_POINTS
@@ -34,7 +35,6 @@
#define IS_SUPPORTED_FLAG(f, name) ((f & DEVFREQ_GOV_FLAG_##name) ? true : false)
#define IS_SUPPORTED_ATTR(f, name) ((f & DEVFREQ_GOV_ATTR_##name) ? true : false)
-#define HZ_PER_KHZ 1000
static struct class *devfreq_class;
static struct dentry *devfreq_debugfs;
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 9561e3d2d428..541efe01abc7 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -42,6 +42,7 @@ config UDMABUF
config DMABUF_MOVE_NOTIFY
bool "Move notify between drivers (EXPERIMENTAL)"
default n
+ depends on DMA_SHARED_BUFFER
help
Don't pin buffers if the dynamic DMA-buf interface is available on
both the exporter as well as the importer. This fixes a security
@@ -52,6 +53,7 @@ config DMABUF_MOVE_NOTIFY
config DMABUF_DEBUG
bool "DMA-BUF debug checks"
+ depends on DMA_SHARED_BUFFER
default y if DMA_API_DEBUG
help
This option enables additional checks for DMA-BUF importers and
@@ -74,7 +76,7 @@ menuconfig DMABUF_HEAPS
menuconfig DMABUF_SYSFS_STATS
bool "DMA-BUF sysfs statistics"
- select DMA_SHARED_BUFFER
+ depends on DMA_SHARED_BUFFER
help
Choose this option to enable DMA-BUF sysfs statistics
in location /sys/kernel/dmabuf/buffers.
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 39b5b46e880f..80c2c03cb014 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -277,10 +277,15 @@ config INTEL_IDMA64
Enable DMA support for Intel Low Power Subsystem such as found on
Intel Skylake PCH.
+config INTEL_IDXD_BUS
+ tristate
+ default INTEL_IDXD
+
config INTEL_IDXD
tristate "Intel Data Accelerators support"
- depends on PCI && X86_64
+ depends on PCI && X86_64 && !UML
depends on PCI_MSI
+ depends on PCI_PASID
depends on SBITMAP
select DMA_ENGINE
help
@@ -291,6 +296,23 @@ config INTEL_IDXD
If unsure, say N.
+config INTEL_IDXD_COMPAT
+ bool "Legacy behavior for idxd driver"
+ depends on PCI && X86_64
+ select INTEL_IDXD_BUS
+ help
+ Compatible driver to support old /sys/bus/dsa/drivers/dsa behavior.
+ The old behavior performed driver bind/unbind for device and wq
+ devices all under the dsa driver. The compat driver will emulate
+ the legacy behavior in order to allow existing support apps (i.e.
+ accel-config) to continue function. It is expected that accel-config
+ v3.2 and earlier will need the compat mode. A distro with later
+ accel-config version can disable this compat config.
+
+ Say Y if you have old applications that require such behavior.
+
+ If unsure, say N.
+
# Config symbol that collects all the dependencies that's necessary to
# support shared virtual memory for the devices supported by idxd.
config INTEL_IDXD_SVM
@@ -315,7 +337,7 @@ config INTEL_IDXD_PERFMON
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
- depends on PCI && X86_64
+ depends on PCI && X86_64 && !UML
select DMA_ENGINE
select DMA_ENGINE_RAID
select DCA
@@ -716,6 +738,8 @@ source "drivers/dma/bestcomm/Kconfig"
source "drivers/dma/mediatek/Kconfig"
+source "drivers/dma/ptdma/Kconfig"
+
source "drivers/dma/qcom/Kconfig"
source "drivers/dma/dw/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index aa69094e3547..616d926cf2a5 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
+obj-$(CONFIG_AMD_PTDMA) += ptdma/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
@@ -41,7 +42,7 @@ obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
-obj-$(CONFIG_INTEL_IDXD) += idxd/
+obj-y += idxd/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 235f1396f968..5906eae26e2a 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -70,10 +70,22 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
si = (const struct acpi_csrt_shared_info *)&grp[1];
- /* Match device by MMIO and IRQ */
+ /* Match device by MMIO */
if (si->mmio_base_low != lower_32_bits(mem) ||
- si->mmio_base_high != upper_32_bits(mem) ||
- si->gsi_interrupt != irq)
+ si->mmio_base_high != upper_32_bits(mem))
+ return 0;
+
+ /*
+ * acpi_gsi_to_irq() can't be used because some platforms do not save
+ * registered IRQs in the MP table. Instead we just try to register
+ * the GSI, which is the core part of the above mentioned function.
+ */
+ ret = acpi_register_gsi(NULL, si->gsi_interrupt, si->interrupt_mode, si->interrupt_polarity);
+ if (ret < 0)
+ return 0;
+
+ /* Match device by Linux vIRQ */
+ if (ret != irq)
return 0;
dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 0fe0676f8e1d..5a2c7573b692 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -691,10 +691,14 @@ static void msgdma_tasklet(struct tasklet_struct *t)
spin_lock_irqsave(&mdev->lock, flags);
- /* Read number of responses that are available */
- count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
- dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
- __func__, __LINE__, count);
+ if (mdev->resp) {
+ /* Read number of responses that are available */
+ count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
+ dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
+ __func__, __LINE__, count);
+ } else {
+ count = 1;
+ }
while (count--) {
/*
@@ -703,8 +707,12 @@ static void msgdma_tasklet(struct tasklet_struct *t)
* have any real values, like transferred bytes or error
* bits. So we need to just drop these values.
*/
- size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
- status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
+ if (mdev->resp) {
+ size = ioread32(mdev->resp +
+ MSGDMA_RESP_BYTES_TRANSFERRED);
+ status = ioread32(mdev->resp +
+ MSGDMA_RESP_STATUS);
+ }
msgdma_complete_descriptor(mdev);
msgdma_chan_desc_cleanup(mdev);
@@ -757,14 +765,21 @@ static void msgdma_dev_remove(struct msgdma_device *mdev)
}
static int request_and_map(struct platform_device *pdev, const char *name,
- struct resource **res, void __iomem **ptr)
+ struct resource **res, void __iomem **ptr,
+ bool optional)
{
struct resource *region;
struct device *device = &pdev->dev;
*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
if (*res == NULL) {
- dev_err(device, "resource %s not defined\n", name);
+ if (optional) {
+ *ptr = NULL;
+ dev_info(device, "optional resource %s not defined\n",
+ name);
+ return 0;
+ }
+ dev_err(device, "mandatory resource %s not defined\n", name);
return -ENODEV;
}
@@ -805,17 +820,17 @@ static int msgdma_probe(struct platform_device *pdev)
mdev->dev = &pdev->dev;
/* Map CSR space */
- ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr);
+ ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr, false);
if (ret)
return ret;
/* Map (extended) descriptor space */
- ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc);
+ ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc, false);
if (ret)
return ret;
/* Map response space */
- ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp);
+ ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp, true);
if (ret)
return ret;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 64a52bf4d737..ab78e0f6afd7 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -2240,10 +2240,16 @@ static struct platform_driver at_xdmac_driver = {
static int __init at_xdmac_init(void)
{
- return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
+ return platform_driver_register(&at_xdmac_driver);
}
subsys_initcall(at_xdmac_init);
+static void __exit at_xdmac_exit(void)
+{
+ platform_driver_unregister(&at_xdmac_driver);
+}
+module_exit(at_xdmac_exit);
+
MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index d9e4ac3edb4e..35993ab92154 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -363,12 +363,16 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC)
<< CH_CFG_H_TT_FC_POS;
+ if (chan->chip->apb_regs)
+ reg |= (chan->id << CH_CFG_H_DST_PER_POS);
break;
case DMA_DEV_TO_MEM:
reg |= (chan->config.device_fc ?
DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC)
<< CH_CFG_H_TT_FC_POS;
+ if (chan->chip->apb_regs)
+ reg |= (chan->id << CH_CFG_H_SRC_PER_POS);
break;
default:
break;
@@ -470,18 +474,13 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan)
pm_runtime_put(chan->chip->dev);
}
-static void dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip,
- u32 handshake_num, bool set)
+static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
{
- unsigned long start = 0;
- unsigned long reg_value;
- unsigned long reg_mask;
- unsigned long reg_set;
- unsigned long mask;
- unsigned long val;
+ struct axi_dma_chip *chip = chan->chip;
+ unsigned long reg_value, val;
if (!chip->apb_regs) {
- dev_dbg(chip->dev, "apb_regs not initialized\n");
+ dev_err(chip->dev, "apb_regs not initialized\n");
return;
}
@@ -490,26 +489,22 @@ static void dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip,
* Lock the DMA channel by assign a handshake number to the channel.
* Unlock the DMA channel by assign 0x3F to the channel.
*/
- if (set) {
- reg_set = UNUSED_CHANNEL;
- val = handshake_num;
- } else {
- reg_set = handshake_num;
+ if (set)
+ val = chan->hw_handshake_num;
+ else
val = UNUSED_CHANNEL;
- }
reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
- for_each_set_clump8(start, reg_mask, &reg_value, 64) {
- if (reg_mask == reg_set) {
- mask = GENMASK_ULL(start + 7, start);
- reg_value &= ~mask;
- reg_value |= rol64(val, start);
- lo_hi_writeq(reg_value,
- chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
- break;
- }
- }
+ /* Channel is already allocated, set handshake as per channel ID */
+ /* 64 bit write should handle for 8 channels */
+
+ reg_value &= ~(DMA_APB_HS_SEL_MASK <<
+ (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
+ reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
+ lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
+
+ return;
}
/*
@@ -742,7 +737,7 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
llp = hw_desc->llp;
} while (total_segments);
- dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
+ dw_axi_dma_set_hw_channel(chan, true);
return vchan_tx_prep(&chan->vc, &desc->vd, flags);
@@ -822,7 +817,7 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
llp = hw_desc->llp;
} while (num_sgs);
- dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
+ dw_axi_dma_set_hw_channel(chan, true);
return vchan_tx_prep(&chan->vc, &desc->vd, flags);
@@ -1098,8 +1093,7 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
"%s failed to stop\n", axi_chan_name(chan));
if (chan->direction != DMA_MEM_TO_MEM)
- dw_axi_dma_set_hw_channel(chan->chip,
- chan->hw_handshake_num, false);
+ dw_axi_dma_set_hw_channel(chan, false);
if (chan->direction == DMA_MEM_TO_DEV)
dw_axi_dma_set_byte_halfword(chan, false);
@@ -1296,7 +1290,7 @@ static int parse_device_properties(struct axi_dma_chip *chip)
return -EINVAL;
chip->dw->hdata->restrict_axi_burst_len = true;
- chip->dw->hdata->axi_rw_burst_len = tmp - 1;
+ chip->dw->hdata->axi_rw_burst_len = tmp;
}
return 0;
@@ -1365,7 +1359,6 @@ static int dw_probe(struct platform_device *pdev)
if (ret)
return ret;
-
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < hdata->nr_channels; i++) {
struct axi_dma_chan *chan = &dw->chan[i];
@@ -1386,6 +1379,7 @@ static int dw_probe(struct platform_device *pdev)
/* DMA capabilities */
dw->dma.chancnt = hdata->nr_channels;
+ dw->dma.max_burst = hdata->axi_rw_burst_len;
dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.directions = BIT(DMA_MEM_TO_MEM);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index b69897887c76..380005afde16 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -184,6 +184,8 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
#define DMAC_APB_HALFWORD_WR_CH_EN 0x020 /* DMAC Halfword write enables */
#define UNUSED_CHANNEL 0x3F /* Set unused DMA channel to 0x3F */
+#define DMA_APB_HS_SEL_BIT_SIZE 0x08 /* HW handshake bits per channel */
+#define DMA_APB_HS_SEL_MASK 0xFF /* HW handshake select masks */
#define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */
/* DMAC_CFG */
@@ -256,6 +258,8 @@ enum {
/* CH_CFG_H */
#define CH_CFG_H_PRIORITY_POS 17
+#define CH_CFG_H_DST_PER_POS 12
+#define CH_CFG_H_SRC_PER_POS 7
#define CH_CFG_H_HS_SEL_DST_POS 4
#define CH_CFG_H_HS_SEL_SRC_POS 3
enum {
diff --git a/drivers/dma/dw/idma32.c b/drivers/dma/dw/idma32.c
index 3ce44de25d33..58f4078d83fe 100644
--- a/drivers/dma/dw/idma32.c
+++ b/drivers/dma/dw/idma32.c
@@ -1,15 +1,144 @@
// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2013,2018 Intel Corporation
+// Copyright (C) 2013,2018,2020-2021 Intel Corporation
#include <linux/bitops.h>
#include <linux/dmaengine.h>
#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "internal.h"
-static void idma32_initialize_chan(struct dw_dma_chan *dwc)
+#define DMA_CTL_CH(x) (0x1000 + (x) * 4)
+#define DMA_SRC_ADDR_FILLIN(x) (0x1100 + (x) * 4)
+#define DMA_DST_ADDR_FILLIN(x) (0x1200 + (x) * 4)
+#define DMA_XBAR_SEL(x) (0x1300 + (x) * 4)
+#define DMA_REGACCESS_CHID_CFG (0x1400)
+
+#define CTL_CH_TRANSFER_MODE_MASK GENMASK(1, 0)
+#define CTL_CH_TRANSFER_MODE_S2S 0
+#define CTL_CH_TRANSFER_MODE_S2D 1
+#define CTL_CH_TRANSFER_MODE_D2S 2
+#define CTL_CH_TRANSFER_MODE_D2D 3
+#define CTL_CH_RD_RS_MASK GENMASK(4, 3)
+#define CTL_CH_WR_RS_MASK GENMASK(6, 5)
+#define CTL_CH_RD_NON_SNOOP_BIT BIT(8)
+#define CTL_CH_WR_NON_SNOOP_BIT BIT(9)
+
+#define XBAR_SEL_DEVID_MASK GENMASK(15, 0)
+#define XBAR_SEL_RX_TX_BIT BIT(16)
+#define XBAR_SEL_RX_TX_SHIFT 16
+
+#define REGACCESS_CHID_MASK GENMASK(2, 0)
+
+static unsigned int idma32_get_slave_devfn(struct dw_dma_chan *dwc)
+{
+ struct device *slave = dwc->chan.slave;
+
+ if (!slave || !dev_is_pci(slave))
+ return 0;
+
+ return to_pci_dev(slave)->devfn;
+}
+
+static void idma32_initialize_chan_xbar(struct dw_dma_chan *dwc)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ void __iomem *misc = __dw_regs(dw);
+ u32 cfghi = 0, cfglo = 0;
+ u8 dst_id, src_id;
+ u32 value;
+
+ /* DMA Channel ID Configuration register must be programmed first */
+ value = readl(misc + DMA_REGACCESS_CHID_CFG);
+
+ value &= ~REGACCESS_CHID_MASK;
+ value |= dwc->chan.chan_id;
+
+ writel(value, misc + DMA_REGACCESS_CHID_CFG);
+
+ /* Configure channel attributes */
+ value = readl(misc + DMA_CTL_CH(dwc->chan.chan_id));
+
+ value &= ~(CTL_CH_RD_NON_SNOOP_BIT | CTL_CH_WR_NON_SNOOP_BIT);
+ value &= ~(CTL_CH_RD_RS_MASK | CTL_CH_WR_RS_MASK);
+ value &= ~CTL_CH_TRANSFER_MODE_MASK;
+
+ switch (dwc->direction) {
+ case DMA_MEM_TO_DEV:
+ value |= CTL_CH_TRANSFER_MODE_D2S;
+ value |= CTL_CH_WR_NON_SNOOP_BIT;
+ break;
+ case DMA_DEV_TO_MEM:
+ value |= CTL_CH_TRANSFER_MODE_S2D;
+ value |= CTL_CH_RD_NON_SNOOP_BIT;
+ break;
+ default:
+ /*
+ * Memory-to-Memory and Device-to-Device are ignored for now.
+ *
+ * For Memory-to-Memory transfers we would need to set mode
+ * and disable snooping on both sides.
+ */
+ return;
+ }
+
+ writel(value, misc + DMA_CTL_CH(dwc->chan.chan_id));
+
+ /* Configure crossbar selection */
+ value = readl(misc + DMA_XBAR_SEL(dwc->chan.chan_id));
+
+ /* DEVFN selection */
+ value &= ~XBAR_SEL_DEVID_MASK;
+ value |= idma32_get_slave_devfn(dwc);
+
+ switch (dwc->direction) {
+ case DMA_MEM_TO_DEV:
+ value |= XBAR_SEL_RX_TX_BIT;
+ break;
+ case DMA_DEV_TO_MEM:
+ value &= ~XBAR_SEL_RX_TX_BIT;
+ break;
+ default:
+ /* Memory-to-Memory and Device-to-Device are ignored for now */
+ return;
+ }
+
+ writel(value, misc + DMA_XBAR_SEL(dwc->chan.chan_id));
+
+ /* Configure DMA channel low and high registers */
+ switch (dwc->direction) {
+ case DMA_MEM_TO_DEV:
+ dst_id = dwc->chan.chan_id;
+ src_id = dwc->dws.src_id;
+ break;
+ case DMA_DEV_TO_MEM:
+ dst_id = dwc->dws.dst_id;
+ src_id = dwc->chan.chan_id;
+ break;
+ default:
+ /* Memory-to-Memory and Device-to-Device are ignored for now */
+ return;
+ }
+
+ /* Set default burst alignment */
+ cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
+
+ /* Low 4 bits of the request lines */
+ cfghi |= IDMA32C_CFGH_DST_PER(dst_id & 0xf);
+ cfghi |= IDMA32C_CFGH_SRC_PER(src_id & 0xf);
+
+ /* Request line extension (2 bits) */
+ cfghi |= IDMA32C_CFGH_DST_PER_EXT(dst_id >> 4 & 0x3);
+ cfghi |= IDMA32C_CFGH_SRC_PER_EXT(src_id >> 4 & 0x3);
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+}
+
+static void idma32_initialize_chan_generic(struct dw_dma_chan *dwc)
{
u32 cfghi = 0;
u32 cfglo = 0;
@@ -134,7 +263,10 @@ int idma32_dma_probe(struct dw_dma_chip *chip)
return -ENOMEM;
/* Channel operations */
- dw->initialize_chan = idma32_initialize_chan;
+ if (chip->pdata->quirks & DW_DMA_QUIRK_XBAR_PRESENT)
+ dw->initialize_chan = idma32_initialize_chan_xbar;
+ else
+ dw->initialize_chan = idma32_initialize_chan_generic;
dw->suspend_chan = idma32_suspend_chan;
dw->resume_chan = idma32_resume_chan;
dw->prepare_ctllo = idma32_prepare_ctllo;
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
index 2e1c52eefdeb..563ce73488db 100644
--- a/drivers/dma/dw/internal.h
+++ b/drivers/dma/dw/internal.h
@@ -74,4 +74,20 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
.remove = idma32_dma_remove,
};
+static const struct dw_dma_platform_data xbar_pdata = {
+ .nr_channels = 8,
+ .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
+ .block_size = 131071,
+ .nr_masters = 1,
+ .data_width = {4},
+ .quirks = DW_DMA_QUIRK_XBAR_PRESENT,
+};
+
+static __maybe_unused const struct dw_dma_chip_pdata xbar_chip_pdata = {
+ .pdata = &xbar_pdata,
+ .probe = idma32_dma_probe,
+ .remove = idma32_dma_remove,
+};
+
#endif /* _DMA_DW_INTERNAL_H */
diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c
index c1cf7675b9d1..523ca806837c 100644
--- a/drivers/dma/dw/of.c
+++ b/drivers/dma/dw/of.c
@@ -50,15 +50,10 @@ struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct dw_dma_platform_data *pdata;
- u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
+ u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
u32 nr_masters;
u32 nr_channels;
- if (!np) {
- dev_err(&pdev->dev, "Missing DT data\n");
- return NULL;
- }
-
if (of_property_read_u32(np, "dma-masters", &nr_masters))
return NULL;
if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
@@ -76,41 +71,29 @@ struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
pdata->nr_masters = nr_masters;
pdata->nr_channels = nr_channels;
- if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
- pdata->chan_allocation_order = (unsigned char)tmp;
+ of_property_read_u32(np, "chan_allocation_order", &pdata->chan_allocation_order);
+ of_property_read_u32(np, "chan_priority", &pdata->chan_priority);
- if (!of_property_read_u32(np, "chan_priority", &tmp))
- pdata->chan_priority = tmp;
+ of_property_read_u32(np, "block_size", &pdata->block_size);
- if (!of_property_read_u32(np, "block_size", &tmp))
- pdata->block_size = tmp;
-
- if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
- for (tmp = 0; tmp < nr_masters; tmp++)
- pdata->data_width[tmp] = arr[tmp];
- } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
+ /* Try deprecated property first */
+ if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
for (tmp = 0; tmp < nr_masters; tmp++)
pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
}
- if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
- for (tmp = 0; tmp < nr_channels; tmp++)
- pdata->multi_block[tmp] = mb[tmp];
- } else {
- for (tmp = 0; tmp < nr_channels; tmp++)
- pdata->multi_block[tmp] = 1;
- }
+ /* If "data_width" and "data-width" both provided use the latter one */
+ of_property_read_u32_array(np, "data-width", pdata->data_width, nr_masters);
- if (of_property_read_u32_array(np, "snps,max-burst-len", pdata->max_burst,
- nr_channels)) {
- memset32(pdata->max_burst, DW_DMA_MAX_BURST, nr_channels);
- }
+ memset32(pdata->multi_block, 1, nr_channels);
+ of_property_read_u32_array(np, "multi-block", pdata->multi_block, nr_channels);
- if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
- if (tmp > CHAN_PROTCTL_MASK)
- return NULL;
- pdata->protctl = tmp;
- }
+ memset32(pdata->max_burst, DW_DMA_MAX_BURST, nr_channels);
+ of_property_read_u32_array(np, "snps,max-burst-len", pdata->max_burst, nr_channels);
+
+ of_property_read_u32(np, "snps,dma-protection-control", &pdata->protctl);
+ if (pdata->protctl > CHAN_PROTCTL_MASK)
+ return NULL;
return pdata;
}
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 1142aa6f8c4a..26a3f926da02 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -120,9 +120,9 @@ static const struct pci_device_id dw_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_dma_chip_pdata },
/* Elkhart Lake iDMA 32-bit (PSE DMA) */
- { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_chip_pdata },
- { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_chip_pdata },
- { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&xbar_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&xbar_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&xbar_chip_pdata },
/* Haswell */
{ PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_dma_chip_pdata },
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 0585d749d935..246118955877 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -149,9 +149,9 @@ static const struct acpi_device_id dw_dma_acpi_id_table[] = {
{ "808622C0", (kernel_ulong_t)&dw_dma_chip_pdata },
/* Elkhart Lake iDMA 32-bit (PSE DMA) */
- { "80864BB4", (kernel_ulong_t)&idma32_chip_pdata },
- { "80864BB5", (kernel_ulong_t)&idma32_chip_pdata },
- { "80864BB6", (kernel_ulong_t)&idma32_chip_pdata },
+ { "80864BB4", (kernel_ulong_t)&xbar_chip_pdata },
+ { "80864BB5", (kernel_ulong_t)&xbar_chip_pdata },
+ { "80864BB6", (kernel_ulong_t)&xbar_chip_pdata },
{ }
};
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 01027779beb8..98f9ee70362e 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -897,7 +897,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
if (data && data->name)
name = data->name;
- ret = clk_enable(edmac->clk);
+ ret = clk_prepare_enable(edmac->clk);
if (ret)
return ret;
@@ -936,7 +936,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
fail_free_irq:
free_irq(edmac->irq, edmac);
fail_clk_disable:
- clk_disable(edmac->clk);
+ clk_disable_unprepare(edmac->clk);
return ret;
}
@@ -969,7 +969,7 @@ static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
list_for_each_entry_safe(desc, d, &list, node)
kfree(desc);
- clk_disable(edmac->clk);
+ clk_disable_unprepare(edmac->clk);
free_irq(edmac->irq, edmac);
}
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index 4ae057922ef1..8dd40d00a672 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -291,9 +291,8 @@ static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
if (err) {
- list_del(&dpaa2_comp->list);
- list_add_tail(&dpaa2_comp->list,
- &dpaa2_chan->comp_free);
+ list_move_tail(&dpaa2_comp->list,
+ &dpaa2_chan->comp_free);
}
}
err_enqueue:
@@ -626,8 +625,7 @@ static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
dpaa2_comp = to_fsl_qdma_comp(vdesc);
qchan = dpaa2_comp->qchan;
spin_lock_irqsave(&qchan->queue_lock, flags);
- list_del(&dpaa2_comp->list);
- list_add_tail(&dpaa2_comp->list, &qchan->comp_free);
+ list_move_tail(&dpaa2_comp->list, &qchan->comp_free);
spin_unlock_irqrestore(&qchan->queue_lock, flags);
}
@@ -703,7 +701,7 @@ static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
/* DPDMAI enable */
err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
if (err) {
- dev_err(dev, "dpdmai_enable() faile\n");
+ dev_err(dev, "dpdmai_enable() failed\n");
goto err_enable;
}
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
index a259ee010e9b..c855a0e4f9ff 100644
--- a/drivers/dma/hisi_dma.c
+++ b/drivers/dma/hisi_dma.c
@@ -133,11 +133,6 @@ static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
writel_relaxed(tmp, addr);
}
-static void hisi_dma_free_irq_vectors(void *data)
-{
- pci_free_irq_vectors(data);
-}
-
static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
bool pause)
{
@@ -544,6 +539,7 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, hdma_dev);
pci_set_master(pdev);
+ /* This will be freed by 'pcim_release()'. See 'pcim_enable_device()' */
ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM,
PCI_IRQ_MSI);
if (ret < 0) {
@@ -551,10 +547,6 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
- ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev);
- if (ret)
- return ret;
-
dma_dev = &hdma_dev->dma_dev;
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
index 6d11558756f8..a1e9f2b3a37c 100644
--- a/drivers/dma/idxd/Makefile
+++ b/drivers/dma/idxd/Makefile
@@ -1,4 +1,12 @@
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+
obj-$(CONFIG_INTEL_IDXD) += idxd.o
idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
+
+obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
+idxd_bus-y := bus.o
+
+obj-$(CONFIG_INTEL_IDXD_COMPAT) += idxd_compat.o
+idxd_compat-y := compat.o
diff --git a/drivers/dma/idxd/bus.c b/drivers/dma/idxd/bus.c
new file mode 100644
index 000000000000..6f84621053c6
--- /dev/null
+++ b/drivers/dma/idxd/bus.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include "idxd.h"
+
+
+int __idxd_driver_register(struct idxd_device_driver *idxd_drv, struct module *owner,
+ const char *mod_name)
+{
+ struct device_driver *drv = &idxd_drv->drv;
+
+ if (!idxd_drv->type) {
+ pr_debug("driver type not set (%ps)\n", __builtin_return_address(0));
+ return -EINVAL;
+ }
+
+ drv->name = idxd_drv->name;
+ drv->bus = &dsa_bus_type;
+ drv->owner = owner;
+ drv->mod_name = mod_name;
+
+ return driver_register(drv);
+}
+EXPORT_SYMBOL_GPL(__idxd_driver_register);
+
+void idxd_driver_unregister(struct idxd_device_driver *idxd_drv)
+{
+ driver_unregister(&idxd_drv->drv);
+}
+EXPORT_SYMBOL_GPL(idxd_driver_unregister);
+
+static int idxd_config_bus_match(struct device *dev,
+ struct device_driver *drv)
+{
+ struct idxd_device_driver *idxd_drv =
+ container_of(drv, struct idxd_device_driver, drv);
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+ int i = 0;
+
+ while (idxd_drv->type[i] != IDXD_DEV_NONE) {
+ if (idxd_dev->type == idxd_drv->type[i])
+ return 1;
+ i++;
+ }
+
+ return 0;
+}
+
+static int idxd_config_bus_probe(struct device *dev)
+{
+ struct idxd_device_driver *idxd_drv =
+ container_of(dev->driver, struct idxd_device_driver, drv);
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return idxd_drv->probe(idxd_dev);
+}
+
+static void idxd_config_bus_remove(struct device *dev)
+{
+ struct idxd_device_driver *idxd_drv =
+ container_of(dev->driver, struct idxd_device_driver, drv);
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ idxd_drv->remove(idxd_dev);
+}
+
+struct bus_type dsa_bus_type = {
+ .name = "dsa",
+ .match = idxd_config_bus_match,
+ .probe = idxd_config_bus_probe,
+ .remove = idxd_config_bus_remove,
+};
+EXPORT_SYMBOL_GPL(dsa_bus_type);
+
+static int __init dsa_bus_init(void)
+{
+ return bus_register(&dsa_bus_type);
+}
+module_init(dsa_bus_init);
+
+static void __exit dsa_bus_exit(void)
+{
+ bus_unregister(&dsa_bus_type);
+}
+module_exit(dsa_bus_exit);
+
+MODULE_DESCRIPTION("IDXD driver dsa_bus_type driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index e9def577c697..b9b2b4a4124e 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -41,7 +41,7 @@ struct idxd_user_context {
static void idxd_cdev_dev_release(struct device *dev)
{
- struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
+ struct idxd_cdev *idxd_cdev = dev_to_cdev(dev);
struct idxd_cdev_context *cdev_ctx;
struct idxd_wq *wq = idxd_cdev->wq;
@@ -218,14 +218,13 @@ static __poll_t idxd_cdev_poll(struct file *filp,
struct idxd_user_context *ctx = filp->private_data;
struct idxd_wq *wq = ctx->wq;
struct idxd_device *idxd = wq->idxd;
- unsigned long flags;
__poll_t out = 0;
poll_wait(filp, &wq->err_queue, wait);
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
if (idxd->sw_err.valid)
out = EPOLLIN | EPOLLRDNORM;
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
return out;
}
@@ -256,9 +255,10 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
if (!idxd_cdev)
return -ENOMEM;
+ idxd_cdev->idxd_dev.type = IDXD_DEV_CDEV;
idxd_cdev->wq = wq;
cdev = &idxd_cdev->cdev;
- dev = &idxd_cdev->dev;
+ dev = cdev_dev(idxd_cdev);
cdev_ctx = &ictx[wq->idxd->data->type];
minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
if (minor < 0) {
@@ -268,7 +268,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
idxd_cdev->minor = minor;
device_initialize(dev);
- dev->parent = &wq->conf_dev;
+ dev->parent = wq_confdev(wq);
dev->bus = &dsa_bus_type;
dev->type = &idxd_cdev_device_type;
dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
@@ -299,10 +299,67 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
idxd_cdev = wq->idxd_cdev;
wq->idxd_cdev = NULL;
- cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
- put_device(&idxd_cdev->dev);
+ cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
+ put_device(cdev_dev(idxd_cdev));
}
+static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
+{
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return -ENXIO;
+
+ mutex_lock(&wq->wq_lock);
+ wq->type = IDXD_WQT_USER;
+ rc = __drv_enable_wq(wq);
+ if (rc < 0)
+ goto err;
+
+ rc = idxd_wq_add_cdev(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_CDEV_ERR;
+ goto err_cdev;
+ }
+
+ idxd->cmd_status = 0;
+ mutex_unlock(&wq->wq_lock);
+ return 0;
+
+err_cdev:
+ __drv_disable_wq(wq);
+err:
+ wq->type = IDXD_WQT_NONE;
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+}
+
+static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
+{
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+
+ mutex_lock(&wq->wq_lock);
+ idxd_wq_del_cdev(wq);
+ __drv_disable_wq(wq);
+ wq->type = IDXD_WQT_NONE;
+ mutex_unlock(&wq->wq_lock);
+}
+
+static enum idxd_dev_type dev_types[] = {
+ IDXD_DEV_WQ,
+ IDXD_DEV_NONE,
+};
+
+struct idxd_device_driver idxd_user_drv = {
+ .probe = idxd_user_drv_probe,
+ .remove = idxd_user_drv_remove,
+ .name = "user",
+ .type = dev_types,
+};
+EXPORT_SYMBOL_GPL(idxd_user_drv);
+
int idxd_cdev_register(void)
{
int rc, i;
diff --git a/drivers/dma/idxd/compat.c b/drivers/dma/idxd/compat.c
new file mode 100644
index 000000000000..3df21615f888
--- /dev/null
+++ b/drivers/dma/idxd/compat.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/device/bus.h>
+#include "idxd.h"
+
+extern int device_driver_attach(struct device_driver *drv, struct device *dev);
+extern void device_driver_detach(struct device *dev);
+
+#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
+ struct driver_attribute driver_attr_##_name = \
+ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
+
+static ssize_t unbind_store(struct device_driver *drv, const char *buf, size_t count)
+{
+ struct bus_type *bus = drv->bus;
+ struct device *dev;
+ int rc = -ENODEV;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (dev && dev->driver) {
+ device_driver_detach(dev);
+ rc = count;
+ }
+
+ return rc;
+}
+static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store);
+
+static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t count)
+{
+ struct bus_type *bus = drv->bus;
+ struct device *dev;
+ struct device_driver *alt_drv = NULL;
+ int rc = -ENODEV;
+ struct idxd_dev *idxd_dev;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (!dev || dev->driver || drv != &dsa_drv.drv)
+ return -ENODEV;
+
+ idxd_dev = confdev_to_idxd_dev(dev);
+ if (is_idxd_dev(idxd_dev)) {
+ alt_drv = driver_find("idxd", bus);
+ } else if (is_idxd_wq_dev(idxd_dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+
+ if (is_idxd_wq_kernel(wq))
+ alt_drv = driver_find("dmaengine", bus);
+ else if (is_idxd_wq_user(wq))
+ alt_drv = driver_find("user", bus);
+ }
+ if (!alt_drv)
+ return -ENODEV;
+
+ rc = device_driver_attach(alt_drv, dev);
+ if (rc < 0)
+ return rc;
+
+ return count;
+}
+static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store);
+
+static struct attribute *dsa_drv_compat_attrs[] = {
+ &driver_attr_bind.attr,
+ &driver_attr_unbind.attr,
+ NULL,
+};
+
+static const struct attribute_group dsa_drv_compat_attr_group = {
+ .attrs = dsa_drv_compat_attrs,
+};
+
+static const struct attribute_group *dsa_drv_compat_groups[] = {
+ &dsa_drv_compat_attr_group,
+ NULL,
+};
+
+static int idxd_dsa_drv_probe(struct idxd_dev *idxd_dev)
+{
+ return -ENODEV;
+}
+
+static void idxd_dsa_drv_remove(struct idxd_dev *idxd_dev)
+{
+}
+
+static enum idxd_dev_type dev_types[] = {
+ IDXD_DEV_NONE,
+};
+
+struct idxd_device_driver dsa_drv = {
+ .name = "dsa",
+ .probe = idxd_dsa_drv_probe,
+ .remove = idxd_dsa_drv_remove,
+ .type = dev_types,
+ .drv = {
+ .suppress_bind_attrs = true,
+ .groups = dsa_drv_compat_groups,
+ },
+};
+
+module_idxd_driver(dsa_drv);
+MODULE_IMPORT_NS(IDXD);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 420b93fe5feb..83a5ff2ecf2a 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -15,6 +15,8 @@
static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
u32 *status);
+static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
+static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
/* Interrupt control bits */
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
@@ -139,8 +141,8 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
if (wq->type != IDXD_WQT_KERNEL)
return 0;
- wq->num_descs = wq->size;
- num_descs = wq->size;
+ num_descs = wq_dedicated(wq) ? wq->size : wq->threshold;
+ wq->num_descs = num_descs;
rc = alloc_hw_descs(wq, num_descs);
if (rc < 0)
@@ -234,7 +236,7 @@ int idxd_wq_enable(struct idxd_wq *wq)
return 0;
}
-int idxd_wq_disable(struct idxd_wq *wq)
+int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
@@ -255,6 +257,8 @@ int idxd_wq_disable(struct idxd_wq *wq)
return -ENXIO;
}
+ if (reset_config)
+ idxd_wq_disable_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
dev_dbg(dev, "WQ %d disabled\n", wq->id);
return 0;
@@ -289,6 +293,7 @@ void idxd_wq_reset(struct idxd_wq *wq)
operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
+ idxd_wq_disable_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
}
@@ -315,6 +320,7 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
devm_iounmap(dev, wq->portal);
wq->portal = NULL;
+ wq->portal_offset = 0;
}
void idxd_wqs_unmap_portal(struct idxd_device *idxd)
@@ -335,19 +341,18 @@ int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
int rc;
union wqcfg wqcfg;
unsigned int offset;
- unsigned long flags;
- rc = idxd_wq_disable(wq);
+ rc = idxd_wq_disable(wq, false);
if (rc < 0)
return rc;
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
wqcfg.pasid_en = 1;
wqcfg.pasid = pasid;
iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
rc = idxd_wq_enable(wq);
if (rc < 0)
@@ -362,19 +367,18 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
int rc;
union wqcfg wqcfg;
unsigned int offset;
- unsigned long flags;
- rc = idxd_wq_disable(wq);
+ rc = idxd_wq_disable(wq, false);
if (rc < 0)
return rc;
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
wqcfg.pasid_en = 0;
wqcfg.pasid = 0;
iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
rc = idxd_wq_enable(wq);
if (rc < 0)
@@ -383,11 +387,11 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
return 0;
}
-void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
- lockdep_assert_held(&idxd->dev_lock);
+ lockdep_assert_held(&wq->wq_lock);
memset(wq->wqcfg, 0, idxd->wqcfg_size);
wq->type = IDXD_WQT_NONE;
wq->size = 0;
@@ -396,6 +400,7 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
wq->priority = 0;
wq->ats_dis = 0;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
+ clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
}
@@ -455,7 +460,6 @@ int idxd_device_init_reset(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
union idxd_command_reg cmd;
- unsigned long flags;
if (idxd_device_is_halted(idxd)) {
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
@@ -465,13 +469,13 @@ int idxd_device_init_reset(struct idxd_device *idxd)
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = IDXD_CMD_RESET_DEVICE;
dev_dbg(dev, "%s: sending reset for init.\n", __func__);
- spin_lock_irqsave(&idxd->cmd_lock, flags);
+ spin_lock(&idxd->cmd_lock);
iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
IDXD_CMDSTS_ACTIVE)
cpu_relax();
- spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+ spin_unlock(&idxd->cmd_lock);
return 0;
}
@@ -480,7 +484,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
{
union idxd_command_reg cmd;
DECLARE_COMPLETION_ONSTACK(done);
- unsigned long flags;
+ u32 stat;
if (idxd_device_is_halted(idxd)) {
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
@@ -494,7 +498,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
cmd.operand = operand;
cmd.int_req = 1;
- spin_lock_irqsave(&idxd->cmd_lock, flags);
+ spin_lock(&idxd->cmd_lock);
wait_event_lock_irq(idxd->cmd_waitq,
!test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
idxd->cmd_lock);
@@ -511,18 +515,18 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
* After command submitted, release lock and go to sleep until
* the command completes via interrupt.
*/
- spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+ spin_unlock(&idxd->cmd_lock);
wait_for_completion(&done);
- spin_lock_irqsave(&idxd->cmd_lock, flags);
- if (status) {
- *status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
- idxd->cmd_status = *status & GENMASK(7, 0);
- }
+ stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ spin_lock(&idxd->cmd_lock);
+ if (status)
+ *status = stat;
+ idxd->cmd_status = stat & GENMASK(7, 0);
__clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
/* Wake up other pending commands */
wake_up(&idxd->cmd_waitq);
- spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+ spin_unlock(&idxd->cmd_lock);
}
int idxd_device_enable(struct idxd_device *idxd)
@@ -548,27 +552,10 @@ int idxd_device_enable(struct idxd_device *idxd)
return 0;
}
-void idxd_device_wqs_clear_state(struct idxd_device *idxd)
-{
- int i;
-
- lockdep_assert_held(&idxd->dev_lock);
-
- for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = idxd->wqs[i];
-
- if (wq->state == IDXD_WQ_ENABLED) {
- idxd_wq_disable_cleanup(wq);
- wq->state = IDXD_WQ_DISABLED;
- }
- }
-}
-
int idxd_device_disable(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
u32 status;
- unsigned long flags;
if (!idxd_is_enabled(idxd)) {
dev_dbg(dev, "Device is not enabled\n");
@@ -584,22 +571,20 @@ int idxd_device_disable(struct idxd_device *idxd)
return -ENXIO;
}
- spin_lock_irqsave(&idxd->dev_lock, flags);
- idxd_device_wqs_clear_state(idxd);
- idxd->state = IDXD_DEV_CONF_READY;
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
+ idxd_device_clear_state(idxd);
+ idxd->state = IDXD_DEV_DISABLED;
+ spin_unlock(&idxd->dev_lock);
return 0;
}
void idxd_device_reset(struct idxd_device *idxd)
{
- unsigned long flags;
-
idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
- spin_lock_irqsave(&idxd->dev_lock, flags);
- idxd_device_wqs_clear_state(idxd);
- idxd->state = IDXD_DEV_CONF_READY;
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
+ idxd_device_clear_state(idxd);
+ idxd->state = IDXD_DEV_DISABLED;
+ spin_unlock(&idxd->dev_lock);
}
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
@@ -649,7 +634,6 @@ int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
struct device *dev = &idxd->pdev->dev;
u32 operand, status;
union idxd_command_reg cmd;
- unsigned long flags;
if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
return -EOPNOTSUPP;
@@ -667,13 +651,13 @@ int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
- spin_lock_irqsave(&idxd->cmd_lock, flags);
+ spin_lock(&idxd->cmd_lock);
iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
cpu_relax();
status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
- spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+ spin_unlock(&idxd->cmd_lock);
if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
dev_dbg(dev, "release int handle failed: %#x\n", status);
@@ -685,6 +669,59 @@ int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
}
/* Device configuration bits */
+static void idxd_engines_clear_state(struct idxd_device *idxd)
+{
+ struct idxd_engine *engine;
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ for (i = 0; i < idxd->max_engines; i++) {
+ engine = idxd->engines[i];
+ engine->group = NULL;
+ }
+}
+
+static void idxd_groups_clear_state(struct idxd_device *idxd)
+{
+ struct idxd_group *group;
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = idxd->groups[i];
+ memset(&group->grpcfg, 0, sizeof(group->grpcfg));
+ group->num_engines = 0;
+ group->num_wqs = 0;
+ group->use_token_limit = false;
+ group->tokens_allowed = 0;
+ group->tokens_reserved = 0;
+ group->tc_a = -1;
+ group->tc_b = -1;
+ }
+}
+
+static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
+{
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = idxd->wqs[i];
+
+ if (wq->state == IDXD_WQ_ENABLED) {
+ idxd_wq_disable_cleanup(wq);
+ wq->state = IDXD_WQ_DISABLED;
+ }
+ }
+}
+
+void idxd_device_clear_state(struct idxd_device *idxd)
+{
+ idxd_groups_clear_state(idxd);
+ idxd_engines_clear_state(idxd);
+ idxd_device_wqs_clear_state(idxd);
+}
+
void idxd_msix_perm_setup(struct idxd_device *idxd)
{
union msix_perm mperm;
@@ -773,6 +810,15 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
return 0;
}
+static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+
+ if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV))
+ return true;
+ return false;
+}
+
static int idxd_wq_config_write(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
@@ -796,6 +842,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->wq_size = wq->size;
if (wq->size == 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_SIZE;
dev_warn(dev, "Incorrect work queue size: 0\n");
return -EINVAL;
}
@@ -804,7 +851,6 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->wq_thresh = wq->threshold;
/* byte 8-11 */
- wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
if (wq_dedicated(wq))
wq->wqcfg->mode = 1;
@@ -814,6 +860,25 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->pasid = idxd->pasid;
}
+ /*
+ * Here the priv bit is set depending on the WQ type. priv = 1 if the
+ * WQ type is kernel to indicate privileged access. This setting only
+ * matters for dedicated WQ. According to the DSA spec:
+ * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
+ * Privileged Mode Enable field of the PCI Express PASID capability
+ * is 0, this field must be 0.
+ *
+ * In the case of a dedicated kernel WQ that is not able to support
+ * the PASID cap, then the configuration will be rejected.
+ */
+ wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
+ if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
+ !idxd_device_pasid_priv_enabled(idxd) &&
+ wq->type == IDXD_WQT_KERNEL) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
+ return -EOPNOTSUPP;
+ }
+
wq->wqcfg->priority = wq->priority;
if (idxd->hw.gen_cap.block_on_fault &&
@@ -931,6 +996,7 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
continue;
if (wq_shared(wq) && !device_swq_supported(idxd)) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
dev_warn(dev, "No shared wq support but configured.\n");
return -EINVAL;
}
@@ -939,8 +1005,10 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
configured++;
}
- if (configured == 0)
+ if (configured == 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
return -EINVAL;
+ }
return 0;
}
@@ -1086,3 +1154,203 @@ int idxd_device_load_config(struct idxd_device *idxd)
return 0;
}
+
+int __drv_enable_wq(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ int rc = -ENXIO;
+
+ lockdep_assert_held(&wq->wq_lock);
+
+ if (idxd->state != IDXD_DEV_ENABLED) {
+ idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
+ goto err;
+ }
+
+ if (wq->state != IDXD_WQ_DISABLED) {
+ dev_dbg(dev, "wq %d already enabled.\n", wq->id);
+ idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if (!wq->group) {
+ dev_dbg(dev, "wq %d not attached to group.\n", wq->id);
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
+ goto err;
+ }
+
+ if (strlen(wq->name) == 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
+ dev_dbg(dev, "wq %d name not set.\n", wq->id);
+ goto err;
+ }
+
+ /* Shared WQ checks */
+ if (wq_shared(wq)) {
+ if (!device_swq_supported(idxd)) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
+ dev_dbg(dev, "PASID not enabled and shared wq.\n");
+ goto err;
+ }
+ /*
+ * Shared wq with the threshold set to 0 means the user
+ * did not set the threshold or transitioned from a
+ * dedicated wq but did not set threshold. A value
+ * of 0 would effectively disable the shared wq. The
+ * driver does not allow a value of 0 to be set for
+ * threshold via sysfs.
+ */
+ if (wq->threshold == 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
+ dev_dbg(dev, "Shared wq and threshold 0.\n");
+ goto err;
+ }
+ }
+
+ rc = 0;
+ spin_lock(&idxd->dev_lock);
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ rc = idxd_device_config(idxd);
+ spin_unlock(&idxd->dev_lock);
+ if (rc < 0) {
+ dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
+ goto err;
+ }
+
+ rc = idxd_wq_enable(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
+ goto err;
+ }
+
+ rc = idxd_wq_map_portal(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
+ dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc);
+ goto err_map_portal;
+ }
+
+ wq->client_count = 0;
+ return 0;
+
+err_map_portal:
+ rc = idxd_wq_disable(wq, false);
+ if (rc < 0)
+ dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
+err:
+ return rc;
+}
+
+int drv_enable_wq(struct idxd_wq *wq)
+{
+ int rc;
+
+ mutex_lock(&wq->wq_lock);
+ rc = __drv_enable_wq(wq);
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+}
+
+void __drv_disable_wq(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+
+ lockdep_assert_held(&wq->wq_lock);
+
+ if (idxd_wq_refcount(wq))
+ dev_warn(dev, "Clients has claim on wq %d: %d\n",
+ wq->id, idxd_wq_refcount(wq));
+
+ idxd_wq_unmap_portal(wq);
+
+ idxd_wq_drain(wq);
+ idxd_wq_reset(wq);
+
+ wq->client_count = 0;
+}
+
+void drv_disable_wq(struct idxd_wq *wq)
+{
+ mutex_lock(&wq->wq_lock);
+ __drv_disable_wq(wq);
+ mutex_unlock(&wq->wq_lock);
+}
+
+int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
+{
+ struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
+ int rc = 0;
+
+ /*
+ * Device should be in disabled state for the idxd_drv to load. If it's in
+ * enabled state, then the device was altered outside of driver's control.
+ * If the state is in halted state, then we don't want to proceed.
+ */
+ if (idxd->state != IDXD_DEV_DISABLED) {
+ idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
+ return -ENXIO;
+ }
+
+ /* Device configuration */
+ spin_lock(&idxd->dev_lock);
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ rc = idxd_device_config(idxd);
+ spin_unlock(&idxd->dev_lock);
+ if (rc < 0)
+ return -ENXIO;
+
+ /* Start device */
+ rc = idxd_device_enable(idxd);
+ if (rc < 0)
+ return rc;
+
+ /* Setup DMA device without channels */
+ rc = idxd_register_dma_device(idxd);
+ if (rc < 0) {
+ idxd_device_disable(idxd);
+ idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
+ return rc;
+ }
+
+ idxd->cmd_status = 0;
+ return 0;
+}
+
+void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
+{
+ struct device *dev = &idxd_dev->conf_dev;
+ struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = idxd->wqs[i];
+ struct device *wq_dev = wq_confdev(wq);
+
+ if (wq->state == IDXD_WQ_DISABLED)
+ continue;
+ dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev));
+ device_release_driver(wq_dev);
+ }
+
+ idxd_unregister_dma_device(idxd);
+ idxd_device_disable(idxd);
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ idxd_device_reset(idxd);
+}
+
+static enum idxd_dev_type dev_types[] = {
+ IDXD_DEV_DSA,
+ IDXD_DEV_IAX,
+ IDXD_DEV_NONE,
+};
+
+struct idxd_device_driver idxd_drv = {
+ .type = dev_types,
+ .probe = idxd_device_drv_probe,
+ .remove = idxd_device_drv_remove,
+ .name = "idxd",
+};
+EXPORT_SYMBOL_GPL(idxd_drv);
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index 77439b645044..e0f056c1d1f5 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -69,7 +69,11 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
hw->src_addr = addr_f1;
hw->dst_addr = addr_f2;
hw->xfer_size = len;
- hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
+ /*
+ * For dedicated WQ, this field is ignored and HW will use the WQCFG.priv
+ * field instead. This field should be set to 1 for kernel descriptors.
+ */
+ hw->priv = 1;
hw->completion_addr = compl;
}
@@ -149,10 +153,8 @@ static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
rc = idxd_submit_desc(wq, desc);
- if (rc < 0) {
- idxd_free_desc(wq, desc);
+ if (rc < 0)
return rc;
- }
return cookie;
}
@@ -245,7 +247,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
wq->idxd_chan = idxd_chan;
idxd_chan->wq = wq;
- get_device(&wq->conf_dev);
+ get_device(wq_confdev(wq));
return 0;
}
@@ -260,5 +262,87 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq)
list_del(&chan->device_node);
kfree(wq->idxd_chan);
wq->idxd_chan = NULL;
- put_device(&wq->conf_dev);
+ put_device(wq_confdev(wq));
}
+
+static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
+{
+ struct device *dev = &idxd_dev->conf_dev;
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return -ENXIO;
+
+ mutex_lock(&wq->wq_lock);
+ wq->type = IDXD_WQT_KERNEL;
+ rc = __drv_enable_wq(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
+ rc = -ENXIO;
+ goto err;
+ }
+
+ rc = idxd_wq_alloc_resources(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
+ dev_dbg(dev, "WQ resource alloc failed\n");
+ goto err_res_alloc;
+ }
+
+ rc = idxd_wq_init_percpu_ref(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
+ dev_dbg(dev, "percpu_ref setup failed\n");
+ goto err_ref;
+ }
+
+ rc = idxd_register_dma_channel(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR;
+ dev_dbg(dev, "Failed to register dma channel\n");
+ goto err_dma;
+ }
+
+ idxd->cmd_status = 0;
+ mutex_unlock(&wq->wq_lock);
+ return 0;
+
+err_dma:
+ idxd_wq_quiesce(wq);
+err_ref:
+ idxd_wq_free_resources(wq);
+err_res_alloc:
+ __drv_disable_wq(wq);
+err:
+ wq->type = IDXD_WQT_NONE;
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+}
+
+static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
+{
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+
+ mutex_lock(&wq->wq_lock);
+ idxd_wq_quiesce(wq);
+ idxd_unregister_dma_channel(wq);
+ __drv_disable_wq(wq);
+ idxd_wq_free_resources(wq);
+ wq->type = IDXD_WQT_NONE;
+ mutex_unlock(&wq->wq_lock);
+}
+
+static enum idxd_dev_type dev_types[] = {
+ IDXD_DEV_WQ,
+ IDXD_DEV_NONE,
+};
+
+struct idxd_device_driver idxd_dmaengine_drv = {
+ .probe = idxd_dmaengine_drv_probe,
+ .remove = idxd_dmaengine_drv_remove,
+ .name = "dmaengine",
+ .type = dev_types,
+};
+EXPORT_SYMBOL_GPL(idxd_dmaengine_drv);
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index fc708be7ad9a..bfcb03329f77 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -11,14 +11,32 @@
#include <linux/idr.h>
#include <linux/pci.h>
#include <linux/perf_event.h>
+#include <uapi/linux/idxd.h>
#include "registers.h"
#define IDXD_DRIVER_VERSION "1.00"
extern struct kmem_cache *idxd_desc_pool;
+extern bool tc_override;
-struct idxd_device;
struct idxd_wq;
+struct idxd_dev;
+
+enum idxd_dev_type {
+ IDXD_DEV_NONE = -1,
+ IDXD_DEV_DSA = 0,
+ IDXD_DEV_IAX,
+ IDXD_DEV_WQ,
+ IDXD_DEV_GROUP,
+ IDXD_DEV_ENGINE,
+ IDXD_DEV_CDEV,
+ IDXD_DEV_MAX_TYPE,
+};
+
+struct idxd_dev {
+ struct device conf_dev;
+ enum idxd_dev_type type;
+};
#define IDXD_REG_TIMEOUT 50
#define IDXD_DRAIN_TIMEOUT 5000
@@ -34,9 +52,18 @@ enum idxd_type {
#define IDXD_PMU_EVENT_MAX 64
struct idxd_device_driver {
+ const char *name;
+ enum idxd_dev_type *type;
+ int (*probe)(struct idxd_dev *idxd_dev);
+ void (*remove)(struct idxd_dev *idxd_dev);
struct device_driver drv;
};
+extern struct idxd_device_driver dsa_drv;
+extern struct idxd_device_driver idxd_drv;
+extern struct idxd_device_driver idxd_dmaengine_drv;
+extern struct idxd_device_driver idxd_user_drv;
+
struct idxd_irq_entry {
struct idxd_device *idxd;
int id;
@@ -51,7 +78,7 @@ struct idxd_irq_entry {
};
struct idxd_group {
- struct device conf_dev;
+ struct idxd_dev idxd_dev;
struct idxd_device *idxd;
struct grpcfg grpcfg;
int id;
@@ -110,7 +137,7 @@ enum idxd_wq_type {
struct idxd_cdev {
struct idxd_wq *wq;
struct cdev cdev;
- struct device dev;
+ struct idxd_dev idxd_dev;
int minor;
};
@@ -136,9 +163,10 @@ struct idxd_dma_chan {
struct idxd_wq {
void __iomem *portal;
+ u32 portal_offset;
struct percpu_ref wq_active;
struct completion wq_dead;
- struct device conf_dev;
+ struct idxd_dev idxd_dev;
struct idxd_cdev *idxd_cdev;
struct wait_queue_head err_queue;
struct idxd_device *idxd;
@@ -153,7 +181,6 @@ struct idxd_wq {
enum idxd_wq_state state;
unsigned long flags;
union wqcfg *wqcfg;
- u32 vec_ptr; /* interrupt steering */
struct dsa_hw_desc **hw_descs;
int num_descs;
union {
@@ -174,7 +201,7 @@ struct idxd_wq {
};
struct idxd_engine {
- struct device conf_dev;
+ struct idxd_dev idxd_dev;
int id;
struct idxd_group *group;
struct idxd_device *idxd;
@@ -194,7 +221,6 @@ struct idxd_hw {
enum idxd_device_state {
IDXD_DEV_HALTED = -1,
IDXD_DEV_DISABLED = 0,
- IDXD_DEV_CONF_READY,
IDXD_DEV_ENABLED,
};
@@ -218,7 +244,7 @@ struct idxd_driver_data {
};
struct idxd_device {
- struct device conf_dev;
+ struct idxd_dev idxd_dev;
struct idxd_driver_data *data;
struct list_head list;
struct idxd_hw hw;
@@ -226,7 +252,7 @@ struct idxd_device {
unsigned long flags;
int id;
int major;
- u8 cmd_status;
+ u32 cmd_status;
struct pci_dev *pdev;
void __iomem *reg_base;
@@ -290,7 +316,6 @@ struct idxd_desc {
struct list_head list;
int id;
int cpu;
- unsigned int vector;
struct idxd_wq *wq;
};
@@ -302,11 +327,62 @@ enum idxd_completion_status {
IDXD_COMP_DESC_ABORT = 0xff,
};
-#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
-#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
+#define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev
+#define wq_confdev(wq) &wq->idxd_dev.conf_dev
+#define engine_confdev(engine) &engine->idxd_dev.conf_dev
+#define group_confdev(group) &group->idxd_dev.conf_dev
+#define cdev_dev(cdev) &cdev->idxd_dev.conf_dev
+
+#define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev)
+#define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev)
+#define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev)
+
+static inline struct idxd_device *confdev_to_idxd(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return idxd_dev_to_idxd(idxd_dev);
+}
+
+static inline struct idxd_wq *confdev_to_wq(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return idxd_dev_to_wq(idxd_dev);
+}
+
+static inline struct idxd_engine *confdev_to_engine(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return container_of(idxd_dev, struct idxd_engine, idxd_dev);
+}
+
+static inline struct idxd_group *confdev_to_group(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return container_of(idxd_dev, struct idxd_group, idxd_dev);
+}
+
+static inline struct idxd_cdev *dev_to_cdev(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return container_of(idxd_dev, struct idxd_cdev, idxd_dev);
+}
+
+static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
+{
+ if (type >= IDXD_DEV_MAX_TYPE) {
+ idev->type = IDXD_DEV_NONE;
+ return;
+ }
+
+ idev->type = type;
+}
extern struct bus_type dsa_bus_type;
-extern struct bus_type iax_bus_type;
extern bool support_enqcmd;
extern struct ida idxd_ida;
@@ -316,24 +392,24 @@ extern struct device_type idxd_wq_device_type;
extern struct device_type idxd_engine_device_type;
extern struct device_type idxd_group_device_type;
-static inline bool is_dsa_dev(struct device *dev)
+static inline bool is_dsa_dev(struct idxd_dev *idxd_dev)
{
- return dev->type == &dsa_device_type;
+ return idxd_dev->type == IDXD_DEV_DSA;
}
-static inline bool is_iax_dev(struct device *dev)
+static inline bool is_iax_dev(struct idxd_dev *idxd_dev)
{
- return dev->type == &iax_device_type;
+ return idxd_dev->type == IDXD_DEV_IAX;
}
-static inline bool is_idxd_dev(struct device *dev)
+static inline bool is_idxd_dev(struct idxd_dev *idxd_dev)
{
- return is_dsa_dev(dev) || is_iax_dev(dev);
+ return is_dsa_dev(idxd_dev) || is_iax_dev(idxd_dev);
}
-static inline bool is_idxd_wq_dev(struct device *dev)
+static inline bool is_idxd_wq_dev(struct idxd_dev *idxd_dev)
{
- return dev->type == &idxd_wq_device_type;
+ return idxd_dev->type == IDXD_DEV_WQ;
}
static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
@@ -343,11 +419,16 @@ static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
return false;
}
-static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
+static inline bool is_idxd_wq_user(struct idxd_wq *wq)
{
return wq->type == IDXD_WQT_USER;
}
+static inline bool is_idxd_wq_kernel(struct idxd_wq *wq)
+{
+ return wq->type == IDXD_WQT_KERNEL;
+}
+
static inline bool wq_dedicated(struct idxd_wq *wq)
{
return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
@@ -389,6 +470,24 @@ static inline int idxd_get_wq_portal_full_offset(int wq_id,
return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
}
+#define IDXD_PORTAL_MASK (PAGE_SIZE - 1)
+
+/*
+ * Even though this function can be accessed by multiple threads, it is safe to use.
+ * At worst the address gets used more than once before it gets incremented. We don't
+ * hit a threshold until iops becomes many million times a second. So the occasional
+ * reuse of the same address is tolerable compare to using an atomic variable. This is
+ * safe on a system that has atomic load/store for 32bit integers. Given that this is an
+ * Intel iEP device, that should not be a problem.
+ */
+static inline void __iomem *idxd_wq_portal_addr(struct idxd_wq *wq)
+{
+ int ofs = wq->portal_offset;
+
+ wq->portal_offset = (ofs + sizeof(struct dsa_raw_desc)) & IDXD_PORTAL_MASK;
+ return wq->portal + ofs;
+}
+
static inline void idxd_wq_get(struct idxd_wq *wq)
{
wq->client_count++;
@@ -404,6 +503,16 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
return wq->client_count;
};
+int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
+ struct module *module, const char *mod_name);
+#define idxd_driver_register(driver) \
+ __idxd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+
+void idxd_driver_unregister(struct idxd_device_driver *idxd_drv);
+
+#define module_idxd_driver(__idxd_driver) \
+ module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister)
+
int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
int idxd_register_devices(struct idxd_device *idxd);
@@ -424,13 +533,20 @@ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
/* device control */
+int idxd_register_idxd_drv(void);
+void idxd_unregister_idxd_drv(void);
+int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
+void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
+int drv_enable_wq(struct idxd_wq *wq);
+int __drv_enable_wq(struct idxd_wq *wq);
+void drv_disable_wq(struct idxd_wq *wq);
+void __drv_disable_wq(struct idxd_wq *wq);
int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
void idxd_device_reset(struct idxd_device *idxd);
-void idxd_device_cleanup(struct idxd_device *idxd);
+void idxd_device_clear_state(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd);
-void idxd_device_wqs_clear_state(struct idxd_device *idxd);
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
int idxd_device_load_config(struct idxd_device *idxd);
int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
@@ -443,12 +559,11 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd);
int idxd_wq_alloc_resources(struct idxd_wq *wq);
void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
-int idxd_wq_disable(struct idxd_wq *wq);
+int idxd_wq_disable(struct idxd_wq *wq, bool reset_config);
void idxd_wq_drain(struct idxd_wq *wq);
void idxd_wq_reset(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
-void idxd_wq_disable_cleanup(struct idxd_wq *wq);
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
void idxd_wq_quiesce(struct idxd_wq *wq);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index c0f4c0422f32..eb09bc591c31 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -26,11 +26,16 @@
MODULE_VERSION(IDXD_DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
+MODULE_IMPORT_NS(IDXD);
static bool sva = true;
module_param(sva, bool, 0644);
MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
+bool tc_override;
+module_param(tc_override, bool, 0644);
+MODULE_PARM_DESC(tc_override, "Override traffic class defaults");
+
#define DRV_NAME "idxd"
bool support_enqcmd;
@@ -200,6 +205,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
struct idxd_wq *wq;
+ struct device *conf_dev;
int i, rc;
idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
@@ -214,15 +220,17 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
goto err;
}
+ idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
+ conf_dev = wq_confdev(wq);
wq->id = i;
wq->idxd = idxd;
- device_initialize(&wq->conf_dev);
- wq->conf_dev.parent = &idxd->conf_dev;
- wq->conf_dev.bus = &dsa_bus_type;
- wq->conf_dev.type = &idxd_wq_device_type;
- rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
+ device_initialize(wq_confdev(wq));
+ conf_dev->parent = idxd_confdev(idxd);
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = &idxd_wq_device_type;
+ rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
if (rc < 0) {
- put_device(&wq->conf_dev);
+ put_device(conf_dev);
goto err;
}
@@ -233,7 +241,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
wq->max_batch_size = idxd->max_batch_size;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
- put_device(&wq->conf_dev);
+ put_device(conf_dev);
rc = -ENOMEM;
goto err;
}
@@ -243,8 +251,11 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
return 0;
err:
- while (--i >= 0)
- put_device(&idxd->wqs[i]->conf_dev);
+ while (--i >= 0) {
+ wq = idxd->wqs[i];
+ conf_dev = wq_confdev(wq);
+ put_device(conf_dev);
+ }
return rc;
}
@@ -252,6 +263,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
{
struct idxd_engine *engine;
struct device *dev = &idxd->pdev->dev;
+ struct device *conf_dev;
int i, rc;
idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
@@ -266,15 +278,17 @@ static int idxd_setup_engines(struct idxd_device *idxd)
goto err;
}
+ idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE);
+ conf_dev = engine_confdev(engine);
engine->id = i;
engine->idxd = idxd;
- device_initialize(&engine->conf_dev);
- engine->conf_dev.parent = &idxd->conf_dev;
- engine->conf_dev.bus = &dsa_bus_type;
- engine->conf_dev.type = &idxd_engine_device_type;
- rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
+ device_initialize(conf_dev);
+ conf_dev->parent = idxd_confdev(idxd);
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = &idxd_engine_device_type;
+ rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
if (rc < 0) {
- put_device(&engine->conf_dev);
+ put_device(conf_dev);
goto err;
}
@@ -284,14 +298,18 @@ static int idxd_setup_engines(struct idxd_device *idxd)
return 0;
err:
- while (--i >= 0)
- put_device(&idxd->engines[i]->conf_dev);
+ while (--i >= 0) {
+ engine = idxd->engines[i];
+ conf_dev = engine_confdev(engine);
+ put_device(conf_dev);
+ }
return rc;
}
static int idxd_setup_groups(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
+ struct device *conf_dev;
struct idxd_group *group;
int i, rc;
@@ -307,28 +325,37 @@ static int idxd_setup_groups(struct idxd_device *idxd)
goto err;
}
+ idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP);
+ conf_dev = group_confdev(group);
group->id = i;
group->idxd = idxd;
- device_initialize(&group->conf_dev);
- group->conf_dev.parent = &idxd->conf_dev;
- group->conf_dev.bus = &dsa_bus_type;
- group->conf_dev.type = &idxd_group_device_type;
- rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
+ device_initialize(conf_dev);
+ conf_dev->parent = idxd_confdev(idxd);
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = &idxd_group_device_type;
+ rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
if (rc < 0) {
- put_device(&group->conf_dev);
+ put_device(conf_dev);
goto err;
}
idxd->groups[i] = group;
- group->tc_a = -1;
- group->tc_b = -1;
+ if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
+ group->tc_a = 1;
+ group->tc_b = 1;
+ } else {
+ group->tc_a = -1;
+ group->tc_b = -1;
+ }
}
return 0;
err:
- while (--i >= 0)
- put_device(&idxd->groups[i]->conf_dev);
+ while (--i >= 0) {
+ group = idxd->groups[i];
+ put_device(group_confdev(group));
+ }
return rc;
}
@@ -337,11 +364,11 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
int i;
for (i = 0; i < idxd->max_groups; i++)
- put_device(&idxd->groups[i]->conf_dev);
+ put_device(group_confdev(idxd->groups[i]));
for (i = 0; i < idxd->max_engines; i++)
- put_device(&idxd->engines[i]->conf_dev);
+ put_device(engine_confdev(idxd->engines[i]));
for (i = 0; i < idxd->max_wqs; i++)
- put_device(&idxd->wqs[i]->conf_dev);
+ put_device(wq_confdev(idxd->wqs[i]));
destroy_workqueue(idxd->wq);
}
@@ -381,13 +408,13 @@ static int idxd_setup_internals(struct idxd_device *idxd)
err_wkq_create:
for (i = 0; i < idxd->max_groups; i++)
- put_device(&idxd->groups[i]->conf_dev);
+ put_device(group_confdev(idxd->groups[i]));
err_group:
for (i = 0; i < idxd->max_engines; i++)
- put_device(&idxd->engines[i]->conf_dev);
+ put_device(engine_confdev(idxd->engines[i]));
err_engine:
for (i = 0; i < idxd->max_wqs; i++)
- put_device(&idxd->wqs[i]->conf_dev);
+ put_device(wq_confdev(idxd->wqs[i]));
err_wqs:
kfree(idxd->int_handles);
return rc;
@@ -469,6 +496,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
{
struct device *dev = &pdev->dev;
+ struct device *conf_dev;
struct idxd_device *idxd;
int rc;
@@ -476,19 +504,21 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
if (!idxd)
return NULL;
+ conf_dev = idxd_confdev(idxd);
idxd->pdev = pdev;
idxd->data = data;
+ idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
if (idxd->id < 0)
return NULL;
- device_initialize(&idxd->conf_dev);
- idxd->conf_dev.parent = dev;
- idxd->conf_dev.bus = &dsa_bus_type;
- idxd->conf_dev.type = idxd->data->dev_type;
- rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
+ device_initialize(conf_dev);
+ conf_dev->parent = dev;
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = idxd->data->dev_type;
+ rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
if (rc < 0) {
- put_device(&idxd->conf_dev);
+ put_device(conf_dev);
return NULL;
}
@@ -639,15 +669,9 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
dev_dbg(dev, "Set DMA masks\n");
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc)
- goto err;
-
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc)
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
goto err;
@@ -668,8 +692,6 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_dev_register;
}
- idxd->state = IDXD_DEV_CONF_READY;
-
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
idxd->hw.version);
@@ -680,7 +702,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err:
pci_iounmap(pdev, idxd->reg_base);
err_iomap:
- put_device(&idxd->conf_dev);
+ put_device(idxd_confdev(idxd));
err_idxd_alloc:
pci_disable_device(pdev);
return rc;
@@ -793,7 +815,7 @@ static void idxd_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
destroy_workqueue(idxd->wq);
perfmon_pmu_remove(idxd);
- device_unregister(&idxd->conf_dev);
+ device_unregister(idxd_confdev(idxd));
}
static struct pci_driver idxd_pci_driver = {
@@ -824,13 +846,17 @@ static int __init idxd_init_module(void)
perfmon_init();
- err = idxd_register_bus_type();
+ err = idxd_driver_register(&idxd_drv);
if (err < 0)
- return err;
+ goto err_idxd_driver_register;
- err = idxd_register_driver();
+ err = idxd_driver_register(&idxd_dmaengine_drv);
if (err < 0)
- goto err_idxd_driver_register;
+ goto err_idxd_dmaengine_driver_register;
+
+ err = idxd_driver_register(&idxd_user_drv);
+ if (err < 0)
+ goto err_idxd_user_driver_register;
err = idxd_cdev_register();
if (err)
@@ -845,19 +871,23 @@ static int __init idxd_init_module(void)
err_pci_register:
idxd_cdev_remove();
err_cdev_register:
- idxd_unregister_driver();
+ idxd_driver_unregister(&idxd_user_drv);
+err_idxd_user_driver_register:
+ idxd_driver_unregister(&idxd_dmaengine_drv);
+err_idxd_dmaengine_driver_register:
+ idxd_driver_unregister(&idxd_drv);
err_idxd_driver_register:
- idxd_unregister_bus_type();
return err;
}
module_init(idxd_init_module);
static void __exit idxd_exit_module(void)
{
- idxd_unregister_driver();
+ idxd_driver_unregister(&idxd_user_drv);
+ idxd_driver_unregister(&idxd_dmaengine_drv);
+ idxd_driver_unregister(&idxd_drv);
pci_unregister_driver(&idxd_pci_driver);
idxd_cdev_remove();
- idxd_unregister_bus_type();
perfmon_exit();
}
module_exit(idxd_exit_module);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 4e3a7198c0ca..ca88fa7a328e 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -22,13 +22,6 @@ struct idxd_fault {
struct idxd_device *idxd;
};
-static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
- enum irq_work_type wtype,
- int *processed, u64 data);
-static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
- enum irq_work_type wtype,
- int *processed, u64 data);
-
static void idxd_device_reinit(struct work_struct *work)
{
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
@@ -51,7 +44,7 @@ static void idxd_device_reinit(struct work_struct *work)
rc = idxd_wq_enable(wq);
if (rc < 0) {
dev_warn(dev, "Unable to re-enable wq %s\n",
- dev_name(&wq->conf_dev));
+ dev_name(wq_confdev(wq)));
}
}
}
@@ -59,47 +52,7 @@ static void idxd_device_reinit(struct work_struct *work)
return;
out:
- idxd_device_wqs_clear_state(idxd);
-}
-
-static void idxd_device_fault_work(struct work_struct *work)
-{
- struct idxd_fault *fault = container_of(work, struct idxd_fault, work);
- struct idxd_irq_entry *ie;
- int i;
- int processed;
- int irqcnt = fault->idxd->num_wq_irqs + 1;
-
- for (i = 1; i < irqcnt; i++) {
- ie = &fault->idxd->irq_entries[i];
- irq_process_work_list(ie, IRQ_WORK_PROCESS_FAULT,
- &processed, fault->addr);
- if (processed)
- break;
-
- irq_process_pending_llist(ie, IRQ_WORK_PROCESS_FAULT,
- &processed, fault->addr);
- if (processed)
- break;
- }
-
- kfree(fault);
-}
-
-static int idxd_device_schedule_fault_process(struct idxd_device *idxd,
- u64 fault_addr)
-{
- struct idxd_fault *fault;
-
- fault = kmalloc(sizeof(*fault), GFP_ATOMIC);
- if (!fault)
- return -ENOMEM;
-
- fault->addr = fault_addr;
- fault->idxd = idxd;
- INIT_WORK(&fault->work, idxd_device_fault_work);
- queue_work(idxd->wq, &fault->work);
- return 0;
+ idxd_device_clear_state(idxd);
}
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
@@ -111,7 +64,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
bool err = false;
if (cause & IDXD_INTC_ERR) {
- spin_lock_bh(&idxd->dev_lock);
+ spin_lock(&idxd->dev_lock);
for (i = 0; i < 4; i++)
idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
IDXD_SWERR_OFFSET + i * sizeof(u64));
@@ -136,7 +89,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
}
}
- spin_unlock_bh(&idxd->dev_lock);
+ spin_unlock(&idxd->dev_lock);
val |= IDXD_INTC_ERR;
for (i = 0; i < 4; i++)
@@ -168,15 +121,6 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
if (!err)
return 0;
- /*
- * This case should rarely happen and typically is due to software
- * programming error by the driver.
- */
- if (idxd->sw_err.valid &&
- idxd->sw_err.desc_valid &&
- idxd->sw_err.fault_addr)
- idxd_device_schedule_fault_process(idxd, idxd->sw_err.fault_addr);
-
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
idxd->state = IDXD_DEV_HALTED;
@@ -189,15 +133,15 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
INIT_WORK(&idxd->work, idxd_device_reinit);
queue_work(idxd->wq, &idxd->work);
} else {
- spin_lock_bh(&idxd->dev_lock);
+ spin_lock(&idxd->dev_lock);
idxd_wqs_quiesce(idxd);
idxd_wqs_unmap_portal(idxd);
- idxd_device_wqs_clear_state(idxd);
+ idxd_device_clear_state(idxd);
dev_err(&idxd->pdev->dev,
"idxd halted, need %s.\n",
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
"FLR" : "system reset");
- spin_unlock_bh(&idxd->dev_lock);
+ spin_unlock(&idxd->dev_lock);
return -ENXIO;
}
}
@@ -228,127 +172,79 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
return IRQ_HANDLED;
}
-static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
-{
- /*
- * Completion address can be bad as well. Check fault address match for descriptor
- * and completion address.
- */
- if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) {
- struct idxd_device *idxd = desc->wq->idxd;
- struct device *dev = &idxd->pdev->dev;
-
- dev_warn(dev, "desc with fault address: %#llx\n", fault_addr);
- return true;
- }
-
- return false;
-}
-
-static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
- enum irq_work_type wtype,
- int *processed, u64 data)
+static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
{
struct idxd_desc *desc, *t;
struct llist_node *head;
- int queued = 0;
- unsigned long flags;
- enum idxd_complete_type reason;
- *processed = 0;
head = llist_del_all(&irq_entry->pending_llist);
if (!head)
- goto out;
-
- if (wtype == IRQ_WORK_NORMAL)
- reason = IDXD_COMPLETE_NORMAL;
- else
- reason = IDXD_COMPLETE_DEV_FAIL;
+ return;
llist_for_each_entry_safe(desc, t, head, llnode) {
u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
if (status) {
- if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+ /*
+ * Check against the original status as ABORT is software defined
+ * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
+ */
+ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
- (*processed)++;
continue;
}
- if (unlikely(status != DSA_COMP_SUCCESS))
- match_fault(desc, data);
- complete_desc(desc, reason);
- (*processed)++;
+ complete_desc(desc, IDXD_COMPLETE_NORMAL);
} else {
- spin_lock_irqsave(&irq_entry->list_lock, flags);
+ spin_lock(&irq_entry->list_lock);
list_add_tail(&desc->list,
&irq_entry->work_list);
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
- queued++;
+ spin_unlock(&irq_entry->list_lock);
}
}
-
- out:
- return queued;
}
-static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
- enum irq_work_type wtype,
- int *processed, u64 data)
+static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
{
- int queued = 0;
- unsigned long flags;
LIST_HEAD(flist);
struct idxd_desc *desc, *n;
- enum idxd_complete_type reason;
-
- *processed = 0;
- if (wtype == IRQ_WORK_NORMAL)
- reason = IDXD_COMPLETE_NORMAL;
- else
- reason = IDXD_COMPLETE_DEV_FAIL;
/*
* This lock protects list corruption from access of list outside of the irq handler
* thread.
*/
- spin_lock_irqsave(&irq_entry->list_lock, flags);
+ spin_lock(&irq_entry->list_lock);
if (list_empty(&irq_entry->work_list)) {
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
- return 0;
+ spin_unlock(&irq_entry->list_lock);
+ return;
}
list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
if (desc->completion->status) {
list_del(&desc->list);
- (*processed)++;
list_add_tail(&desc->list, &flist);
- } else {
- queued++;
}
}
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
+ spin_unlock(&irq_entry->list_lock);
list_for_each_entry(desc, &flist, list) {
- u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
-
- if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+ /*
+ * Check against the original status as ABORT is software defined
+ * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
+ */
+ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
continue;
}
- if (unlikely(status != DSA_COMP_SUCCESS))
- match_fault(desc, data);
- complete_desc(desc, reason);
+ complete_desc(desc, IDXD_COMPLETE_NORMAL);
}
-
- return queued;
}
-static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
+irqreturn_t idxd_wq_thread(int irq, void *data)
{
- int rc, processed, total = 0;
+ struct idxd_irq_entry *irq_entry = data;
/*
* There are two lists we are processing. The pending_llist is where
@@ -367,31 +263,9 @@ static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
* and process the completed entries.
* 4. If the entry is still waiting on hardware, list_add_tail() to
* the work_list.
- * 5. Repeat until no more descriptors.
*/
- do {
- rc = irq_process_work_list(irq_entry, IRQ_WORK_NORMAL,
- &processed, 0);
- total += processed;
- if (rc != 0)
- continue;
-
- rc = irq_process_pending_llist(irq_entry, IRQ_WORK_NORMAL,
- &processed, 0);
- total += processed;
- } while (rc != 0);
-
- return total;
-}
-
-irqreturn_t idxd_wq_thread(int irq, void *data)
-{
- struct idxd_irq_entry *irq_entry = data;
- int processed;
-
- processed = idxd_desc_process(irq_entry);
- if (processed == 0)
- return IRQ_NONE;
+ irq_process_work_list(irq_entry);
+ irq_process_pending_llist(irq_entry);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index c970c3f025f0..ffc7550a77ee 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -7,6 +7,9 @@
#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
+#define DEVICE_VERSION_1 0x100
+#define DEVICE_VERSION_2 0x200
+
#define IDXD_MMIO_BAR 0
#define IDXD_WQ_BAR 2
#define IDXD_PORTAL_SIZE PAGE_SIZE
@@ -349,6 +352,9 @@ union wqcfg {
} __packed;
#define WQCFG_PASID_IDX 2
+#define WQCFG_OCCUP_IDX 6
+
+#define WQCFG_OCCUP_MASK 0xffff
/*
* This macro calculates the offset into the WQCFG register
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 36c9c1a89b7e..de76fb4abac2 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -22,21 +22,13 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
desc->hw->pasid = idxd->pasid;
/*
- * Descriptor completion vectors are 1...N for MSIX. We will round
- * robin through the N vectors.
+ * On host, MSIX vecotr 0 is used for misc interrupt. Therefore when we match
+ * vector 1:1 to the WQ id, we need to add 1
*/
- wq->vec_ptr = desc->vector = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
- if (!idxd->int_handles) {
- desc->hw->int_handle = wq->vec_ptr;
- } else {
- /*
- * int_handles are only for descriptor completion. However for device
- * MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
- * though we are rotating through 1...N for descriptor interrupts, we
- * need to acqurie the int_handles from 0..N-1.
- */
- desc->hw->int_handle = idxd->int_handles[desc->vector - 1];
- }
+ if (!idxd->int_handles)
+ desc->hw->int_handle = wq->id + 1;
+ else
+ desc->hw->int_handle = idxd->int_handles[wq->id];
return desc;
}
@@ -67,7 +59,7 @@ struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
if (signal_pending_state(TASK_INTERRUPTIBLE, current))
break;
idx = sbitmap_queue_get(sbq, &cpu);
- if (idx > 0)
+ if (idx >= 0)
break;
schedule();
}
@@ -114,14 +106,13 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
{
struct idxd_desc *d, *t, *found = NULL;
struct llist_node *head;
- unsigned long flags;
desc->completion->status = IDXD_COMP_DESC_ABORT;
/*
* Grab the list lock so it will block the irq thread handler. This allows the
* abort code to locate the descriptor need to be aborted.
*/
- spin_lock_irqsave(&ie->list_lock, flags);
+ spin_lock(&ie->list_lock);
head = llist_del_all(&ie->pending_llist);
if (head) {
llist_for_each_entry_safe(d, t, head, llnode) {
@@ -135,7 +126,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
if (!found)
found = list_abort_desc(wq, ie, desc);
- spin_unlock_irqrestore(&ie->list_lock, flags);
+ spin_unlock(&ie->list_lock);
if (found)
complete_desc(found, IDXD_COMPLETE_ABORT);
@@ -148,13 +139,17 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
void __iomem *portal;
int rc;
- if (idxd->state != IDXD_DEV_ENABLED)
+ if (idxd->state != IDXD_DEV_ENABLED) {
+ idxd_free_desc(wq, desc);
return -EIO;
+ }
- if (!percpu_ref_tryget_live(&wq->wq_active))
+ if (!percpu_ref_tryget_live(&wq->wq_active)) {
+ idxd_free_desc(wq, desc);
return -ENXIO;
+ }
- portal = wq->portal;
+ portal = idxd_wq_portal_addr(wq);
/*
* The wmb() flushes writes to coherent DMA data before
@@ -168,7 +163,7 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
* that we designated the descriptor to.
*/
if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
- ie = &idxd->irq_entries[desc->vector];
+ ie = &idxd->irq_entries[wq->id + 1];
llist_add(&desc->llnode, &ie->pending_llist);
}
@@ -183,8 +178,12 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
*/
rc = enqcmds(portal, desc->hw);
if (rc < 0) {
+ percpu_ref_put(&wq->wq_active);
+ /* abort operation frees the descriptor */
if (ie)
llist_abort_desc(wq, ie, desc);
+ else
+ idxd_free_desc(wq, desc);
return rc;
}
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 26d8ff97d13d..a9025be940db 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -16,336 +16,11 @@ static char *idxd_wq_type_names[] = {
[IDXD_WQT_USER] = "user",
};
-static int idxd_config_bus_match(struct device *dev,
- struct device_driver *drv)
-{
- int matched = 0;
-
- if (is_idxd_dev(dev)) {
- struct idxd_device *idxd = confdev_to_idxd(dev);
-
- if (idxd->state != IDXD_DEV_CONF_READY)
- return 0;
- matched = 1;
- } else if (is_idxd_wq_dev(dev)) {
- struct idxd_wq *wq = confdev_to_wq(dev);
- struct idxd_device *idxd = wq->idxd;
-
- if (idxd->state < IDXD_DEV_CONF_READY)
- return 0;
-
- if (wq->state != IDXD_WQ_DISABLED) {
- dev_dbg(dev, "%s not disabled\n", dev_name(dev));
- return 0;
- }
- matched = 1;
- }
-
- if (matched)
- dev_dbg(dev, "%s matched\n", dev_name(dev));
-
- return matched;
-}
-
-static int enable_wq(struct idxd_wq *wq)
-{
- struct idxd_device *idxd = wq->idxd;
- struct device *dev = &idxd->pdev->dev;
- unsigned long flags;
- int rc;
-
- mutex_lock(&wq->wq_lock);
-
- if (idxd->state != IDXD_DEV_ENABLED) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "Enabling while device not enabled.\n");
- return -EPERM;
- }
-
- if (wq->state != IDXD_WQ_DISABLED) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ %d already enabled.\n", wq->id);
- return -EBUSY;
- }
-
- if (!wq->group) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ not attached to group.\n");
- return -EINVAL;
- }
-
- if (strlen(wq->name) == 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ name not set.\n");
- return -EINVAL;
- }
-
- /* Shared WQ checks */
- if (wq_shared(wq)) {
- if (!device_swq_supported(idxd)) {
- dev_warn(dev, "PASID not enabled and shared WQ.\n");
- mutex_unlock(&wq->wq_lock);
- return -ENXIO;
- }
- /*
- * Shared wq with the threshold set to 0 means the user
- * did not set the threshold or transitioned from a
- * dedicated wq but did not set threshold. A value
- * of 0 would effectively disable the shared wq. The
- * driver does not allow a value of 0 to be set for
- * threshold via sysfs.
- */
- if (wq->threshold == 0) {
- dev_warn(dev, "Shared WQ and threshold 0.\n");
- mutex_unlock(&wq->wq_lock);
- return -EINVAL;
- }
- }
-
- rc = idxd_wq_alloc_resources(wq);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ resource alloc failed\n");
- return rc;
- }
-
- spin_lock_irqsave(&idxd->dev_lock, flags);
- if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
- rc = idxd_device_config(idxd);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "Writing WQ %d config failed: %d\n", wq->id, rc);
- return rc;
- }
-
- rc = idxd_wq_enable(wq);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ %d enabling failed: %d\n", wq->id, rc);
- return rc;
- }
-
- rc = idxd_wq_map_portal(wq);
- if (rc < 0) {
- dev_warn(dev, "wq portal mapping failed: %d\n", rc);
- rc = idxd_wq_disable(wq);
- if (rc < 0)
- dev_warn(dev, "IDXD wq disable failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
-
- wq->client_count = 0;
-
- if (wq->type == IDXD_WQT_KERNEL) {
- rc = idxd_wq_init_percpu_ref(wq);
- if (rc < 0) {
- dev_dbg(dev, "percpu_ref setup failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
- }
-
- if (is_idxd_wq_dmaengine(wq)) {
- rc = idxd_register_dma_channel(wq);
- if (rc < 0) {
- dev_dbg(dev, "DMA channel register failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
- } else if (is_idxd_wq_cdev(wq)) {
- rc = idxd_wq_add_cdev(wq);
- if (rc < 0) {
- dev_dbg(dev, "Cdev creation failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
- }
-
- mutex_unlock(&wq->wq_lock);
- dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
-
- return 0;
-}
-
-static int idxd_config_bus_probe(struct device *dev)
-{
- int rc = 0;
- unsigned long flags;
-
- dev_dbg(dev, "%s called\n", __func__);
-
- if (is_idxd_dev(dev)) {
- struct idxd_device *idxd = confdev_to_idxd(dev);
-
- if (idxd->state != IDXD_DEV_CONF_READY) {
- dev_warn(dev, "Device not ready for config\n");
- return -EBUSY;
- }
-
- if (!try_module_get(THIS_MODULE))
- return -ENXIO;
-
- /* Perform IDXD configuration and enabling */
- spin_lock_irqsave(&idxd->dev_lock, flags);
- if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
- rc = idxd_device_config(idxd);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
- if (rc < 0) {
- module_put(THIS_MODULE);
- dev_warn(dev, "Device config failed: %d\n", rc);
- return rc;
- }
-
- /* start device */
- rc = idxd_device_enable(idxd);
- if (rc < 0) {
- module_put(THIS_MODULE);
- dev_warn(dev, "Device enable failed: %d\n", rc);
- return rc;
- }
-
- dev_info(dev, "Device %s enabled\n", dev_name(dev));
-
- rc = idxd_register_dma_device(idxd);
- if (rc < 0) {
- module_put(THIS_MODULE);
- dev_dbg(dev, "Failed to register dmaengine device\n");
- return rc;
- }
- return 0;
- } else if (is_idxd_wq_dev(dev)) {
- struct idxd_wq *wq = confdev_to_wq(dev);
-
- return enable_wq(wq);
- }
-
- return -ENODEV;
-}
-
-static void disable_wq(struct idxd_wq *wq)
-{
- struct idxd_device *idxd = wq->idxd;
- struct device *dev = &idxd->pdev->dev;
-
- mutex_lock(&wq->wq_lock);
- dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
- if (wq->state == IDXD_WQ_DISABLED) {
- mutex_unlock(&wq->wq_lock);
- return;
- }
-
- if (wq->type == IDXD_WQT_KERNEL)
- idxd_wq_quiesce(wq);
-
- if (is_idxd_wq_dmaengine(wq))
- idxd_unregister_dma_channel(wq);
- else if (is_idxd_wq_cdev(wq))
- idxd_wq_del_cdev(wq);
-
- if (idxd_wq_refcount(wq))
- dev_warn(dev, "Clients has claim on wq %d: %d\n",
- wq->id, idxd_wq_refcount(wq));
-
- idxd_wq_unmap_portal(wq);
-
- idxd_wq_drain(wq);
- idxd_wq_reset(wq);
-
- idxd_wq_free_resources(wq);
- wq->client_count = 0;
- mutex_unlock(&wq->wq_lock);
-
- dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
-}
-
-static void idxd_config_bus_remove(struct device *dev)
-{
- int rc;
-
- dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
-
- /* disable workqueue here */
- if (is_idxd_wq_dev(dev)) {
- struct idxd_wq *wq = confdev_to_wq(dev);
-
- disable_wq(wq);
- } else if (is_idxd_dev(dev)) {
- struct idxd_device *idxd = confdev_to_idxd(dev);
- int i;
-
- dev_dbg(dev, "%s removing dev %s\n", __func__,
- dev_name(&idxd->conf_dev));
- for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = idxd->wqs[i];
-
- if (wq->state == IDXD_WQ_DISABLED)
- continue;
- dev_warn(dev, "Active wq %d on disable %s.\n", i,
- dev_name(&idxd->conf_dev));
- device_release_driver(&wq->conf_dev);
- }
-
- idxd_unregister_dma_device(idxd);
- rc = idxd_device_disable(idxd);
- if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
- for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = idxd->wqs[i];
-
- mutex_lock(&wq->wq_lock);
- idxd_wq_disable_cleanup(wq);
- mutex_unlock(&wq->wq_lock);
- }
- }
- module_put(THIS_MODULE);
- if (rc < 0)
- dev_warn(dev, "Device disable failed\n");
- else
- dev_info(dev, "Device %s disabled\n", dev_name(dev));
-
- }
-}
-
-static void idxd_config_bus_shutdown(struct device *dev)
-{
- dev_dbg(dev, "%s called\n", __func__);
-}
-
-struct bus_type dsa_bus_type = {
- .name = "dsa",
- .match = idxd_config_bus_match,
- .probe = idxd_config_bus_probe,
- .remove = idxd_config_bus_remove,
- .shutdown = idxd_config_bus_shutdown,
-};
-
-static struct idxd_device_driver dsa_drv = {
- .drv = {
- .name = "dsa",
- .bus = &dsa_bus_type,
- .owner = THIS_MODULE,
- .mod_name = KBUILD_MODNAME,
- },
-};
-
-/* IDXD generic driver setup */
-int idxd_register_driver(void)
-{
- return driver_register(&dsa_drv.drv);
-}
-
-void idxd_unregister_driver(void)
-{
- driver_unregister(&dsa_drv.drv);
-}
-
/* IDXD engine attributes */
static ssize_t engine_group_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_engine *engine =
- container_of(dev, struct idxd_engine, conf_dev);
+ struct idxd_engine *engine = confdev_to_engine(dev);
if (engine->group)
return sysfs_emit(buf, "%d\n", engine->group->id);
@@ -357,8 +32,7 @@ static ssize_t engine_group_id_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_engine *engine =
- container_of(dev, struct idxd_engine, conf_dev);
+ struct idxd_engine *engine = confdev_to_engine(dev);
struct idxd_device *idxd = engine->idxd;
long id;
int rc;
@@ -412,7 +86,7 @@ static const struct attribute_group *idxd_engine_attribute_groups[] = {
static void idxd_conf_engine_release(struct device *dev)
{
- struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
+ struct idxd_engine *engine = confdev_to_engine(dev);
kfree(engine);
}
@@ -442,8 +116,7 @@ static ssize_t group_tokens_reserved_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%u\n", group->tokens_reserved);
}
@@ -452,8 +125,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
unsigned long val;
int rc;
@@ -490,8 +162,7 @@ static ssize_t group_tokens_allowed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%u\n", group->tokens_allowed);
}
@@ -500,8 +171,7 @@ static ssize_t group_tokens_allowed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
unsigned long val;
int rc;
@@ -535,8 +205,7 @@ static ssize_t group_use_token_limit_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%u\n", group->use_token_limit);
}
@@ -545,8 +214,7 @@ static ssize_t group_use_token_limit_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
unsigned long val;
int rc;
@@ -578,8 +246,7 @@ static struct device_attribute dev_attr_group_use_token_limit =
static ssize_t group_engines_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
int i, rc = 0;
struct idxd_device *idxd = group->idxd;
@@ -607,8 +274,7 @@ static struct device_attribute dev_attr_group_engines =
static ssize_t group_work_queues_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
int i, rc = 0;
struct idxd_device *idxd = group->idxd;
@@ -637,8 +303,7 @@ static ssize_t group_traffic_class_a_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%d\n", group->tc_a);
}
@@ -647,8 +312,7 @@ static ssize_t group_traffic_class_a_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
long val;
int rc;
@@ -663,6 +327,9 @@ static ssize_t group_traffic_class_a_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
+ if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
+ return -EPERM;
+
if (val < 0 || val > 7)
return -EINVAL;
@@ -678,8 +345,7 @@ static ssize_t group_traffic_class_b_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%d\n", group->tc_b);
}
@@ -688,8 +354,7 @@ static ssize_t group_traffic_class_b_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
long val;
int rc;
@@ -704,6 +369,9 @@ static ssize_t group_traffic_class_b_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
+ if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
+ return -EPERM;
+
if (val < 0 || val > 7)
return -EINVAL;
@@ -737,7 +405,7 @@ static const struct attribute_group *idxd_group_attribute_groups[] = {
static void idxd_conf_group_release(struct device *dev)
{
- struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
kfree(group);
}
@@ -752,7 +420,7 @@ struct device_type idxd_group_device_type = {
static ssize_t wq_clients_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%d\n", wq->client_count);
}
@@ -763,7 +431,7 @@ static struct device_attribute dev_attr_wq_clients =
static ssize_t wq_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
switch (wq->state) {
case IDXD_WQ_DISABLED:
@@ -781,7 +449,7 @@ static struct device_attribute dev_attr_wq_state =
static ssize_t wq_group_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
if (wq->group)
return sysfs_emit(buf, "%u\n", wq->group->id);
@@ -793,7 +461,7 @@ static ssize_t wq_group_id_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
long id;
int rc;
@@ -836,7 +504,7 @@ static struct device_attribute dev_attr_wq_group_id =
static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
}
@@ -845,7 +513,7 @@ static ssize_t wq_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
@@ -872,7 +540,7 @@ static struct device_attribute dev_attr_wq_mode =
static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->size);
}
@@ -895,7 +563,7 @@ static ssize_t wq_size_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
unsigned long size;
struct idxd_device *idxd = wq->idxd;
int rc;
@@ -923,7 +591,7 @@ static struct device_attribute dev_attr_wq_size =
static ssize_t wq_priority_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->priority);
}
@@ -932,7 +600,7 @@ static ssize_t wq_priority_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
unsigned long prio;
struct idxd_device *idxd = wq->idxd;
int rc;
@@ -960,7 +628,7 @@ static struct device_attribute dev_attr_wq_priority =
static ssize_t wq_block_on_fault_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
}
@@ -969,11 +637,14 @@ static ssize_t wq_block_on_fault_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
bool bof;
int rc;
+ if (!idxd->hw.gen_cap.block_on_fault)
+ return -EOPNOTSUPP;
+
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
@@ -999,7 +670,7 @@ static struct device_attribute dev_attr_wq_block_on_fault =
static ssize_t wq_threshold_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->threshold);
}
@@ -1008,7 +679,7 @@ static ssize_t wq_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
unsigned int val;
int rc;
@@ -1040,7 +711,7 @@ static struct device_attribute dev_attr_wq_threshold =
static ssize_t wq_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
switch (wq->type) {
case IDXD_WQT_KERNEL:
@@ -1059,7 +730,7 @@ static ssize_t wq_type_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
enum idxd_wq_type old_type;
if (wq->state != IDXD_WQ_DISABLED)
@@ -1088,7 +759,7 @@ static struct device_attribute dev_attr_wq_type =
static ssize_t wq_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%s\n", wq->name);
}
@@ -1097,7 +768,7 @@ static ssize_t wq_name_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
@@ -1124,7 +795,7 @@ static struct device_attribute dev_attr_wq_name =
static ssize_t wq_cdev_minor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
int minor = -1;
mutex_lock(&wq->wq_lock);
@@ -1158,7 +829,7 @@ static int __get_sysfs_u64(const char *buf, u64 *val)
static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
}
@@ -1166,7 +837,7 @@ static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attri
static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
u64 xfer_size;
int rc;
@@ -1192,7 +863,7 @@ static struct device_attribute dev_attr_wq_max_transfer_size =
static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->max_batch_size);
}
@@ -1200,7 +871,7 @@ static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribut
static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
u64 batch_size;
int rc;
@@ -1225,7 +896,7 @@ static struct device_attribute dev_attr_wq_max_batch_size =
static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->ats_dis);
}
@@ -1233,7 +904,7 @@ static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *
static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
bool ats_dis;
int rc;
@@ -1256,6 +927,24 @@ static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute
static struct device_attribute dev_attr_wq_ats_disable =
__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
+static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+ u32 occup, offset;
+
+ if (!idxd->hw.wq_cap.occupancy)
+ return -EOPNOTSUPP;
+
+ offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
+ occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
+
+ return sysfs_emit(buf, "%u\n", occup);
+}
+
+static struct device_attribute dev_attr_wq_occupancy =
+ __ATTR(occupancy, 0444, wq_occupancy_show, NULL);
+
static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_clients.attr,
&dev_attr_wq_state.attr,
@@ -1271,6 +960,7 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_max_transfer_size.attr,
&dev_attr_wq_max_batch_size.attr,
&dev_attr_wq_ats_disable.attr,
+ &dev_attr_wq_occupancy.attr,
NULL,
};
@@ -1285,7 +975,7 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {
static void idxd_conf_wq_release(struct device *dev)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
kfree(wq->wqcfg);
kfree(wq);
@@ -1301,8 +991,7 @@ struct device_type idxd_wq_device_type = {
static ssize_t version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%#x\n", idxd->hw.version);
}
@@ -1312,8 +1001,7 @@ static ssize_t max_work_queues_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
}
@@ -1322,8 +1010,7 @@ static DEVICE_ATTR_RO(max_work_queues_size);
static ssize_t max_groups_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_groups);
}
@@ -1332,8 +1019,7 @@ static DEVICE_ATTR_RO(max_groups);
static ssize_t max_work_queues_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_wqs);
}
@@ -1342,8 +1028,7 @@ static DEVICE_ATTR_RO(max_work_queues);
static ssize_t max_engines_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_engines);
}
@@ -1352,8 +1037,7 @@ static DEVICE_ATTR_RO(max_engines);
static ssize_t numa_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
}
@@ -1362,8 +1046,7 @@ static DEVICE_ATTR_RO(numa_node);
static ssize_t max_batch_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
}
@@ -1373,8 +1056,7 @@ static ssize_t max_transfer_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
}
@@ -1383,8 +1065,7 @@ static DEVICE_ATTR_RO(max_transfer_size);
static ssize_t op_cap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
int i, rc = 0;
for (i = 0; i < 4; i++)
@@ -1399,8 +1080,7 @@ static DEVICE_ATTR_RO(op_cap);
static ssize_t gen_cap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
}
@@ -1409,8 +1089,7 @@ static DEVICE_ATTR_RO(gen_cap);
static ssize_t configurable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
}
@@ -1419,18 +1098,16 @@ static DEVICE_ATTR_RO(configurable);
static ssize_t clients_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
- unsigned long flags;
+ struct idxd_device *idxd = confdev_to_idxd(dev);
int count = 0, i;
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
for (i = 0; i < idxd->max_wqs; i++) {
struct idxd_wq *wq = idxd->wqs[i];
count += wq->client_count;
}
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
return sysfs_emit(buf, "%d\n", count);
}
@@ -1439,8 +1116,7 @@ static DEVICE_ATTR_RO(clients);
static ssize_t pasid_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
}
@@ -1449,12 +1125,10 @@ static DEVICE_ATTR_RO(pasid_enabled);
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
switch (idxd->state) {
case IDXD_DEV_DISABLED:
- case IDXD_DEV_CONF_READY:
return sysfs_emit(buf, "disabled\n");
case IDXD_DEV_ENABLED:
return sysfs_emit(buf, "enabled\n");
@@ -1469,15 +1143,13 @@ static DEVICE_ATTR_RO(state);
static ssize_t errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
int i, out = 0;
- unsigned long flags;
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
for (i = 0; i < 4; i++)
out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
out--;
out += sysfs_emit_at(buf, out, "\n");
return out;
@@ -1487,8 +1159,7 @@ static DEVICE_ATTR_RO(errors);
static ssize_t max_tokens_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_tokens);
}
@@ -1497,8 +1168,7 @@ static DEVICE_ATTR_RO(max_tokens);
static ssize_t token_limit_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->token_limit);
}
@@ -1507,8 +1177,7 @@ static ssize_t token_limit_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
unsigned long val;
int rc;
@@ -1536,8 +1205,7 @@ static DEVICE_ATTR_RW(token_limit);
static ssize_t cdev_major_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->major);
}
@@ -1546,11 +1214,20 @@ static DEVICE_ATTR_RO(cdev_major);
static ssize_t cmd_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
}
-static DEVICE_ATTR_RO(cmd_status);
+
+static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ idxd->cmd_status = 0;
+ return count;
+}
+static DEVICE_ATTR_RW(cmd_status);
static struct attribute *idxd_device_attributes[] = {
&dev_attr_version.attr,
@@ -1586,7 +1263,7 @@ static const struct attribute_group *idxd_attribute_groups[] = {
static void idxd_conf_device_release(struct device *dev)
{
- struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
kfree(idxd->groups);
kfree(idxd->wqs);
@@ -1611,12 +1288,12 @@ struct device_type iax_device_type = {
static int idxd_register_engine_devices(struct idxd_device *idxd)
{
+ struct idxd_engine *engine;
int i, j, rc;
for (i = 0; i < idxd->max_engines; i++) {
- struct idxd_engine *engine = idxd->engines[i];
-
- rc = device_add(&engine->conf_dev);
+ engine = idxd->engines[i];
+ rc = device_add(engine_confdev(engine));
if (rc < 0)
goto cleanup;
}
@@ -1625,22 +1302,26 @@ static int idxd_register_engine_devices(struct idxd_device *idxd)
cleanup:
j = i - 1;
- for (; i < idxd->max_engines; i++)
- put_device(&idxd->engines[i]->conf_dev);
+ for (; i < idxd->max_engines; i++) {
+ engine = idxd->engines[i];
+ put_device(engine_confdev(engine));
+ }
- while (j--)
- device_unregister(&idxd->engines[j]->conf_dev);
+ while (j--) {
+ engine = idxd->engines[j];
+ device_unregister(engine_confdev(engine));
+ }
return rc;
}
static int idxd_register_group_devices(struct idxd_device *idxd)
{
+ struct idxd_group *group;
int i, j, rc;
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = idxd->groups[i];
-
- rc = device_add(&group->conf_dev);
+ group = idxd->groups[i];
+ rc = device_add(group_confdev(group));
if (rc < 0)
goto cleanup;
}
@@ -1649,22 +1330,26 @@ static int idxd_register_group_devices(struct idxd_device *idxd)
cleanup:
j = i - 1;
- for (; i < idxd->max_groups; i++)
- put_device(&idxd->groups[i]->conf_dev);
+ for (; i < idxd->max_groups; i++) {
+ group = idxd->groups[i];
+ put_device(group_confdev(group));
+ }
- while (j--)
- device_unregister(&idxd->groups[j]->conf_dev);
+ while (j--) {
+ group = idxd->groups[j];
+ device_unregister(group_confdev(group));
+ }
return rc;
}
static int idxd_register_wq_devices(struct idxd_device *idxd)
{
+ struct idxd_wq *wq;
int i, rc, j;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = idxd->wqs[i];
-
- rc = device_add(&wq->conf_dev);
+ wq = idxd->wqs[i];
+ rc = device_add(wq_confdev(wq));
if (rc < 0)
goto cleanup;
}
@@ -1673,11 +1358,15 @@ static int idxd_register_wq_devices(struct idxd_device *idxd)
cleanup:
j = i - 1;
- for (; i < idxd->max_wqs; i++)
- put_device(&idxd->wqs[i]->conf_dev);
+ for (; i < idxd->max_wqs; i++) {
+ wq = idxd->wqs[i];
+ put_device(wq_confdev(wq));
+ }
- while (j--)
- device_unregister(&idxd->wqs[j]->conf_dev);
+ while (j--) {
+ wq = idxd->wqs[j];
+ device_unregister(wq_confdev(wq));
+ }
return rc;
}
@@ -1686,7 +1375,7 @@ int idxd_register_devices(struct idxd_device *idxd)
struct device *dev = &idxd->pdev->dev;
int rc, i;
- rc = device_add(&idxd->conf_dev);
+ rc = device_add(idxd_confdev(idxd));
if (rc < 0)
return rc;
@@ -1712,12 +1401,12 @@ int idxd_register_devices(struct idxd_device *idxd)
err_group:
for (i = 0; i < idxd->max_engines; i++)
- device_unregister(&idxd->engines[i]->conf_dev);
+ device_unregister(engine_confdev(idxd->engines[i]));
err_engine:
for (i = 0; i < idxd->max_wqs; i++)
- device_unregister(&idxd->wqs[i]->conf_dev);
+ device_unregister(wq_confdev(idxd->wqs[i]));
err_wq:
- device_del(&idxd->conf_dev);
+ device_del(idxd_confdev(idxd));
return rc;
}
@@ -1728,19 +1417,19 @@ void idxd_unregister_devices(struct idxd_device *idxd)
for (i = 0; i < idxd->max_wqs; i++) {
struct idxd_wq *wq = idxd->wqs[i];
- device_unregister(&wq->conf_dev);
+ device_unregister(wq_confdev(wq));
}
for (i = 0; i < idxd->max_engines; i++) {
struct idxd_engine *engine = idxd->engines[i];
- device_unregister(&engine->conf_dev);
+ device_unregister(engine_confdev(engine));
}
for (i = 0; i < idxd->max_groups; i++) {
struct idxd_group *group = idxd->groups[i];
- device_unregister(&group->conf_dev);
+ device_unregister(group_confdev(group));
}
}
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index df7704053d91..e2b5129c5f84 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4319,6 +4319,7 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
size_t count)
{
unsigned long val;
+ int err;
if (!count || count > 11)
return -EINVAL;
@@ -4327,7 +4328,10 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
return -EFAULT;
/* Write a key */
- sscanf(buf, "%lx", &val);
+ err = kstrtoul(buf, 16, &val);
+ if (err)
+ return err;
+
dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val);
isync();
@@ -4368,7 +4372,7 @@ static ssize_t poly_store(struct device_driver *dev, const char *buf,
size_t count)
{
unsigned long reg, val;
-
+ int err;
#ifdef CONFIG_440SP
/* 440SP uses default 0x14D polynomial only */
return -EINVAL;
@@ -4378,7 +4382,9 @@ static ssize_t poly_store(struct device_driver *dev, const char *buf,
return -EINVAL;
/* e.g., 0x14D or 0x11D */
- sscanf(buf, "%lx", &val);
+ err = kstrtoul(buf, 16, &val);
+ if (err)
+ return err;
if (val & ~0x1FF)
return -EINVAL;
diff --git a/drivers/dma/ptdma/Kconfig b/drivers/dma/ptdma/Kconfig
new file mode 100644
index 000000000000..b430edd709f9
--- /dev/null
+++ b/drivers/dma/ptdma/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config AMD_PTDMA
+ tristate "AMD PassThru DMA Engine"
+ depends on X86_64 && PCI
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the AMD PTDMA controller. This controller
+ provides DMA capabilities to perform high bandwidth memory to
+ memory and IO copy operations. It performs DMA transfer through
+ queue-based descriptor management. This DMA controller is intended
+ to be used with AMD Non-Transparent Bridge devices and not for
+ general purpose peripheral DMA.
diff --git a/drivers/dma/ptdma/Makefile b/drivers/dma/ptdma/Makefile
new file mode 100644
index 000000000000..ce5410268a9a
--- /dev/null
+++ b/drivers/dma/ptdma/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD Passthru DMA driver
+#
+
+obj-$(CONFIG_AMD_PTDMA) += ptdma.o
+
+ptdma-objs := ptdma-dev.o ptdma-dmaengine.o ptdma-debugfs.o
+
+ptdma-$(CONFIG_PCI) += ptdma-pci.o
diff --git a/drivers/dma/ptdma/ptdma-debugfs.c b/drivers/dma/ptdma/ptdma-debugfs.c
new file mode 100644
index 000000000000..c8307d3044a3
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma-debugfs.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthrough DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "ptdma.h"
+
+/* DebugFS helpers */
+#define RI_VERSION_NUM 0x0000003F
+
+#define RI_NUM_VQM 0x00078000
+#define RI_NVQM_SHIFT 15
+
+static int pt_debugfs_info_show(struct seq_file *s, void *p)
+{
+ struct pt_device *pt = s->private;
+ unsigned int regval;
+
+ seq_printf(s, "Device name: %s\n", dev_name(pt->dev));
+ seq_printf(s, " # Queues: %d\n", 1);
+ seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
+
+ regval = ioread32(pt->io_regs + CMD_PT_VERSION);
+
+ seq_printf(s, " Version: %d\n", regval & RI_VERSION_NUM);
+ seq_puts(s, " Engines:");
+ seq_puts(s, "\n");
+ seq_printf(s, " Queues: %d\n", (regval & RI_NUM_VQM) >> RI_NVQM_SHIFT);
+
+ return 0;
+}
+
+/*
+ * Return a formatted buffer containing the current
+ * statistics of queue for PTDMA
+ */
+static int pt_debugfs_stats_show(struct seq_file *s, void *p)
+{
+ struct pt_device *pt = s->private;
+
+ seq_printf(s, "Total Interrupts Handled: %ld\n", pt->total_interrupts);
+
+ return 0;
+}
+
+static int pt_debugfs_queue_show(struct seq_file *s, void *p)
+{
+ struct pt_cmd_queue *cmd_q = s->private;
+ unsigned int regval;
+
+ if (!cmd_q)
+ return 0;
+
+ seq_printf(s, " Pass-Thru: %ld\n", cmd_q->total_pt_ops);
+
+ regval = ioread32(cmd_q->reg_control + 0x000C);
+
+ seq_puts(s, " Enabled Interrupts:");
+ if (regval & INT_EMPTY_QUEUE)
+ seq_puts(s, " EMPTY");
+ if (regval & INT_QUEUE_STOPPED)
+ seq_puts(s, " STOPPED");
+ if (regval & INT_ERROR)
+ seq_puts(s, " ERROR");
+ if (regval & INT_COMPLETION)
+ seq_puts(s, " COMPLETION");
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_info);
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_queue);
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_stats);
+
+void ptdma_debugfs_setup(struct pt_device *pt)
+{
+ struct pt_cmd_queue *cmd_q;
+ struct dentry *debugfs_q_instance;
+
+ if (!debugfs_initialized())
+ return;
+
+ debugfs_create_file("info", 0400, pt->dma_dev.dbg_dev_root, pt,
+ &pt_debugfs_info_fops);
+
+ debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt,
+ &pt_debugfs_stats_fops);
+
+ cmd_q = &pt->cmd_q;
+
+ debugfs_q_instance =
+ debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
+
+ debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
+ &pt_debugfs_queue_fops);
+}
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
new file mode 100644
index 000000000000..8a6bf291a73f
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma-dev.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthru DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ptdma.h"
+
+/* Human-readable error strings */
+static char *pt_error_codes[] = {
+ "",
+ "ERR 01: ILLEGAL_ENGINE",
+ "ERR 03: ILLEGAL_FUNCTION_TYPE",
+ "ERR 04: ILLEGAL_FUNCTION_MODE",
+ "ERR 06: ILLEGAL_FUNCTION_SIZE",
+ "ERR 08: ILLEGAL_FUNCTION_RSVD",
+ "ERR 09: ILLEGAL_BUFFER_LENGTH",
+ "ERR 10: VLSB_FAULT",
+ "ERR 11: ILLEGAL_MEM_ADDR",
+ "ERR 12: ILLEGAL_MEM_SEL",
+ "ERR 13: ILLEGAL_CONTEXT_ID",
+ "ERR 15: 0xF Reserved",
+ "ERR 18: CMD_TIMEOUT",
+ "ERR 19: IDMA0_AXI_SLVERR",
+ "ERR 20: IDMA0_AXI_DECERR",
+ "ERR 21: 0x15 Reserved",
+ "ERR 22: IDMA1_AXI_SLAVE_FAULT",
+ "ERR 23: IDMA1_AIXI_DECERR",
+ "ERR 24: 0x18 Reserved",
+ "ERR 27: 0x1B Reserved",
+ "ERR 38: ODMA0_AXI_SLVERR",
+ "ERR 39: ODMA0_AXI_DECERR",
+ "ERR 40: 0x28 Reserved",
+ "ERR 41: ODMA1_AXI_SLVERR",
+ "ERR 42: ODMA1_AXI_DECERR",
+ "ERR 43: LSB_PARITY_ERR",
+};
+
+static void pt_log_error(struct pt_device *d, int e)
+{
+ dev_err(d->dev, "PTDMA error: %s (0x%x)\n", pt_error_codes[e], e);
+}
+
+void pt_start_queue(struct pt_cmd_queue *cmd_q)
+{
+ /* Turn on the run bit */
+ iowrite32(cmd_q->qcontrol | CMD_Q_RUN, cmd_q->reg_control);
+}
+
+void pt_stop_queue(struct pt_cmd_queue *cmd_q)
+{
+ /* Turn off the run bit */
+ iowrite32(cmd_q->qcontrol & ~CMD_Q_RUN, cmd_q->reg_control);
+}
+
+static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd_q)
+{
+ bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
+ u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
+ u32 tail;
+
+ if (soc) {
+ desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
+ desc->dw0 &= ~DWORD0_SOC;
+ }
+ mutex_lock(&cmd_q->q_mutex);
+
+ /* Copy 32-byte command descriptor to hw queue. */
+ memcpy(q_desc, desc, 32);
+ cmd_q->qidx = (cmd_q->qidx + 1) % CMD_Q_LEN;
+
+ /* The data used by this command must be flushed to memory */
+ wmb();
+
+ /* Write the new tail address back to the queue register */
+ tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
+ iowrite32(tail, cmd_q->reg_control + 0x0004);
+
+ /* Turn the queue back on using our cached control register */
+ pt_start_queue(cmd_q);
+ mutex_unlock(&cmd_q->q_mutex);
+
+ return 0;
+}
+
+int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
+ struct pt_passthru_engine *pt_engine)
+{
+ struct ptdma_desc desc;
+
+ cmd_q->cmd_error = 0;
+ cmd_q->total_pt_ops++;
+ memset(&desc, 0, sizeof(desc));
+ desc.dw0 = CMD_DESC_DW0_VAL;
+ desc.length = pt_engine->src_len;
+ desc.src_lo = lower_32_bits(pt_engine->src_dma);
+ desc.dw3.src_hi = upper_32_bits(pt_engine->src_dma);
+ desc.dst_lo = lower_32_bits(pt_engine->dst_dma);
+ desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma);
+
+ return pt_core_execute_cmd(&desc, cmd_q);
+}
+
+static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
+{
+ iowrite32(0, pt->cmd_q.reg_control + 0x000C);
+}
+
+static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
+{
+ iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
+}
+
+static void pt_do_cmd_complete(unsigned long data)
+{
+ struct pt_tasklet_data *tdata = (struct pt_tasklet_data *)data;
+ struct pt_cmd *cmd = tdata->cmd;
+ struct pt_cmd_queue *cmd_q = &cmd->pt->cmd_q;
+ u32 tail;
+
+ if (cmd_q->cmd_error) {
+ /*
+ * Log the error and flush the queue by
+ * moving the head pointer
+ */
+ tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
+ pt_log_error(cmd_q->pt, cmd_q->cmd_error);
+ iowrite32(tail, cmd_q->reg_control + 0x0008);
+ }
+
+ cmd->pt_cmd_callback(cmd->data, cmd->ret);
+}
+
+static irqreturn_t pt_core_irq_handler(int irq, void *data)
+{
+ struct pt_device *pt = data;
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+ u32 status;
+
+ pt_core_disable_queue_interrupts(pt);
+ pt->total_interrupts++;
+ status = ioread32(cmd_q->reg_control + 0x0010);
+ if (status) {
+ cmd_q->int_status = status;
+ cmd_q->q_status = ioread32(cmd_q->reg_control + 0x0100);
+ cmd_q->q_int_status = ioread32(cmd_q->reg_control + 0x0104);
+
+ /* On error, only save the first error value */
+ if ((status & INT_ERROR) && !cmd_q->cmd_error)
+ cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+ /* Acknowledge the interrupt */
+ iowrite32(status, cmd_q->reg_control + 0x0010);
+ pt_core_enable_queue_interrupts(pt);
+ pt_do_cmd_complete((ulong)&pt->tdata);
+ }
+ return IRQ_HANDLED;
+}
+
+int pt_core_init(struct pt_device *pt)
+{
+ char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+ u32 dma_addr_lo, dma_addr_hi;
+ struct device *dev = pt->dev;
+ struct dma_pool *dma_pool;
+ int ret;
+
+ /* Allocate a dma pool for the queue */
+ snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q", dev_name(pt->dev));
+
+ dma_pool = dma_pool_create(dma_pool_name, dev,
+ PT_DMAPOOL_MAX_SIZE,
+ PT_DMAPOOL_ALIGN, 0);
+ if (!dma_pool)
+ return -ENOMEM;
+
+ /* ptdma core initialisation */
+ iowrite32(CMD_CONFIG_VHB_EN, pt->io_regs + CMD_CONFIG_OFFSET);
+ iowrite32(CMD_QUEUE_PRIO, pt->io_regs + CMD_QUEUE_PRIO_OFFSET);
+ iowrite32(CMD_TIMEOUT_DISABLE, pt->io_regs + CMD_TIMEOUT_OFFSET);
+ iowrite32(CMD_CLK_GATE_CONFIG, pt->io_regs + CMD_CLK_GATE_CTL_OFFSET);
+ iowrite32(CMD_CONFIG_REQID, pt->io_regs + CMD_REQID_CONFIG_OFFSET);
+
+ cmd_q->pt = pt;
+ cmd_q->dma_pool = dma_pool;
+ mutex_init(&cmd_q->q_mutex);
+
+ /* Page alignment satisfies our needs for N <= 128 */
+ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+ cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
+ &cmd_q->qbase_dma,
+ GFP_KERNEL);
+ if (!cmd_q->qbase) {
+ dev_err(dev, "unable to allocate command queue\n");
+ ret = -ENOMEM;
+ goto e_dma_alloc;
+ }
+
+ cmd_q->qidx = 0;
+
+ /* Preset some register values */
+ cmd_q->reg_control = pt->io_regs + CMD_Q_STATUS_INCR;
+
+ /* Turn off the queues and disable interrupts until ready */
+ pt_core_disable_queue_interrupts(pt);
+
+ cmd_q->qcontrol = 0; /* Start with nothing */
+ iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+ ioread32(cmd_q->reg_control + 0x0104);
+ ioread32(cmd_q->reg_control + 0x0100);
+
+ /* Clear the interrupt status */
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+
+ /* Request an irq */
+ ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
+ if (ret)
+ goto e_pool;
+
+ /* Update the device registers with queue information. */
+ cmd_q->qcontrol &= ~CMD_Q_SIZE;
+ cmd_q->qcontrol |= FIELD_PREP(CMD_Q_SIZE, QUEUE_SIZE_VAL);
+
+ cmd_q->qdma_tail = cmd_q->qbase_dma;
+ dma_addr_lo = lower_32_bits(cmd_q->qdma_tail);
+ iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0004);
+ iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0008);
+
+ dma_addr_hi = upper_32_bits(cmd_q->qdma_tail);
+ cmd_q->qcontrol |= (dma_addr_hi << 16);
+ iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+ pt_core_enable_queue_interrupts(pt);
+
+ /* Register the DMA engine support */
+ ret = pt_dmaengine_register(pt);
+ if (ret)
+ goto e_dmaengine;
+
+ /* Set up debugfs entries */
+ ptdma_debugfs_setup(pt);
+
+ return 0;
+
+e_dmaengine:
+ free_irq(pt->pt_irq, pt);
+
+e_dma_alloc:
+ dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
+
+e_pool:
+ dev_err(dev, "unable to allocate an IRQ\n");
+ dma_pool_destroy(pt->cmd_q.dma_pool);
+
+ return ret;
+}
+
+void pt_core_destroy(struct pt_device *pt)
+{
+ struct device *dev = pt->dev;
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+ struct pt_cmd *cmd;
+
+ /* Unregister the DMA engine */
+ pt_dmaengine_unregister(pt);
+
+ /* Disable and clear interrupts */
+ pt_core_disable_queue_interrupts(pt);
+
+ /* Turn off the run bit */
+ pt_stop_queue(cmd_q);
+
+ /* Clear the interrupt status */
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+ ioread32(cmd_q->reg_control + 0x0104);
+ ioread32(cmd_q->reg_control + 0x0100);
+
+ free_irq(pt->pt_irq, pt);
+
+ dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase,
+ cmd_q->qbase_dma);
+
+ /* Flush the cmd queue */
+ while (!list_empty(&pt->cmd)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&pt->cmd, struct pt_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->pt_cmd_callback(cmd->data, -ENODEV);
+ }
+}
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
new file mode 100644
index 000000000000..c9e52f6f2f50
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma-dmaengine.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthrough DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include "ptdma.h"
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
+{
+ return container_of(dma_chan, struct pt_dma_chan, vc.chan);
+}
+
+static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct pt_dma_desc, vd);
+}
+
+static void pt_free_chan_resources(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+
+ vchan_free_chan_resources(&chan->vc);
+}
+
+static void pt_synchronize(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+
+ vchan_synchronize(&chan->vc);
+}
+
+static void pt_do_cleanup(struct virt_dma_desc *vd)
+{
+ struct pt_dma_desc *desc = to_pt_desc(vd);
+ struct pt_device *pt = desc->pt;
+
+ kmem_cache_free(pt->dma_desc_cache, desc);
+}
+
+static int pt_dma_start_desc(struct pt_dma_desc *desc)
+{
+ struct pt_passthru_engine *pt_engine;
+ struct pt_device *pt;
+ struct pt_cmd *pt_cmd;
+ struct pt_cmd_queue *cmd_q;
+
+ desc->issued_to_hw = 1;
+
+ pt_cmd = &desc->pt_cmd;
+ pt = pt_cmd->pt;
+ cmd_q = &pt->cmd_q;
+ pt_engine = &pt_cmd->passthru;
+
+ pt->tdata.cmd = pt_cmd;
+
+ /* Execute the command */
+ pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
+
+ return 0;
+}
+
+static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
+{
+ /* Get the next DMA descriptor on the active list */
+ struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
+
+ return vd ? to_pt_desc(vd) : NULL;
+}
+
+static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
+ struct pt_dma_desc *desc)
+{
+ struct dma_async_tx_descriptor *tx_desc;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+
+ /* Loop over descriptors until one is found with commands */
+ do {
+ if (desc) {
+ if (!desc->issued_to_hw) {
+ /* No errors, keep going */
+ if (desc->status != DMA_ERROR)
+ return desc;
+ }
+
+ tx_desc = &desc->vd.tx;
+ vd = &desc->vd;
+ } else {
+ tx_desc = NULL;
+ }
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ if (desc) {
+ if (desc->status != DMA_ERROR)
+ desc->status = DMA_COMPLETE;
+
+ dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
+ list_del(&desc->vd.node);
+ }
+
+ desc = pt_next_dma_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ if (tx_desc) {
+ dmaengine_desc_get_callback_invoke(tx_desc, NULL);
+ dma_run_dependencies(tx_desc);
+ vchan_vdesc_fini(vd);
+ }
+ } while (desc);
+
+ return NULL;
+}
+
+static void pt_cmd_callback(void *data, int err)
+{
+ struct pt_dma_desc *desc = data;
+ struct dma_chan *dma_chan;
+ struct pt_dma_chan *chan;
+ int ret;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ dma_chan = desc->vd.tx.chan;
+ chan = to_pt_chan(dma_chan);
+
+ if (err)
+ desc->status = DMA_ERROR;
+
+ while (true) {
+ /* Check for DMA descriptor completion */
+ desc = pt_handle_active_desc(chan, desc);
+
+ /* Don't submit cmd if no descriptor or DMA is paused */
+ if (!desc)
+ break;
+
+ ret = pt_dma_start_desc(desc);
+ if (!ret)
+ break;
+
+ desc->status = DMA_ERROR;
+ }
+}
+
+static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
+ unsigned long flags)
+{
+ struct pt_dma_desc *desc;
+
+ desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ vchan_tx_prep(&chan->vc, &desc->vd, flags);
+
+ desc->pt = chan->pt;
+ desc->issued_to_hw = 0;
+ desc->status = DMA_IN_PROGRESS;
+
+ return desc;
+}
+
+static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
+ dma_addr_t dst,
+ dma_addr_t src,
+ unsigned int len,
+ unsigned long flags)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_passthru_engine *pt_engine;
+ struct pt_dma_desc *desc;
+ struct pt_cmd *pt_cmd;
+
+ desc = pt_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ pt_cmd = &desc->pt_cmd;
+ pt_cmd->pt = chan->pt;
+ pt_engine = &pt_cmd->passthru;
+ pt_cmd->engine = PT_ENGINE_PASSTHRU;
+ pt_engine->src_dma = src;
+ pt_engine->dst_dma = dst;
+ pt_engine->src_len = len;
+ pt_cmd->pt_cmd_callback = pt_cmd_callback;
+ pt_cmd->data = desc;
+
+ desc->len = len;
+
+ return desc;
+}
+
+static struct dma_async_tx_descriptor *
+pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct pt_dma_desc *desc;
+
+ desc = pt_create_desc(dma_chan, dst, src, len, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->vd.tx;
+}
+
+static struct dma_async_tx_descriptor *
+pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc;
+
+ desc = pt_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->vd.tx;
+}
+
+static void pt_issue_pending(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ vchan_issue_pending(&chan->vc);
+
+ desc = pt_next_dma_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* If there was nothing active, start processing */
+ if (desc)
+ pt_cmd_callback(desc, 0);
+}
+
+static int pt_pause(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ pt_stop_queue(&chan->pt->cmd_q);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ return 0;
+}
+
+static int pt_resume(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ pt_start_queue(&chan->pt->cmd_q);
+ desc = pt_next_dma_desc(chan);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* If there was something active, re-start */
+ if (desc)
+ pt_cmd_callback(desc, 0);
+
+ return 0;
+}
+
+static int pt_terminate_all(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ vchan_get_all_descriptors(&chan->vc, &head);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vc, &head);
+ vchan_free_chan_resources(&chan->vc);
+
+ return 0;
+}
+
+int pt_dmaengine_register(struct pt_device *pt)
+{
+ struct pt_dma_chan *chan;
+ struct dma_device *dma_dev = &pt->dma_dev;
+ char *cmd_cache_name;
+ char *desc_cache_name;
+ int ret;
+
+ pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
+ GFP_KERNEL);
+ if (!pt->pt_dma_chan)
+ return -ENOMEM;
+
+ cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
+ "%s-dmaengine-cmd-cache",
+ dev_name(pt->dev));
+ if (!cmd_cache_name)
+ return -ENOMEM;
+
+ desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
+ "%s-dmaengine-desc-cache",
+ dev_name(pt->dev));
+ if (!desc_cache_name) {
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
+ pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
+ sizeof(struct pt_dma_desc), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!pt->dma_desc_cache) {
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
+ dma_dev->dev = pt->dev;
+ dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
+ dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
+ dma_dev->directions = DMA_MEM_TO_MEM;
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+
+ /*
+ * PTDMA is intended to be used with the AMD NTB devices, hence
+ * marking it as DMA_PRIVATE.
+ */
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ chan = pt->pt_dma_chan;
+ chan->pt = pt;
+
+ /* Set base and prep routines */
+ dma_dev->device_free_chan_resources = pt_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
+ dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
+ dma_dev->device_issue_pending = pt_issue_pending;
+ dma_dev->device_tx_status = dma_cookie_status;
+ dma_dev->device_pause = pt_pause;
+ dma_dev->device_resume = pt_resume;
+ dma_dev->device_terminate_all = pt_terminate_all;
+ dma_dev->device_synchronize = pt_synchronize;
+
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+
+ dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto err_reg;
+
+ return 0;
+
+err_reg:
+ kmem_cache_destroy(pt->dma_desc_cache);
+
+err_cache:
+ kmem_cache_destroy(pt->dma_cmd_cache);
+
+ return ret;
+}
+
+void pt_dmaengine_unregister(struct pt_device *pt)
+{
+ struct dma_device *dma_dev = &pt->dma_dev;
+
+ dma_async_device_unregister(dma_dev);
+
+ kmem_cache_destroy(pt->dma_desc_cache);
+ kmem_cache_destroy(pt->dma_cmd_cache);
+}
diff --git a/drivers/dma/ptdma/ptdma-pci.c b/drivers/dma/ptdma/ptdma-pci.c
new file mode 100644
index 000000000000..22739ff0c3c5
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma-pci.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthru DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/pci_ids.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+#include "ptdma.h"
+
+struct pt_msix {
+ int msix_count;
+ struct msix_entry msix_entry;
+};
+
+/*
+ * pt_alloc_struct - allocate and initialize the pt_device struct
+ *
+ * @dev: device struct of the PTDMA
+ */
+static struct pt_device *pt_alloc_struct(struct device *dev)
+{
+ struct pt_device *pt;
+
+ pt = devm_kzalloc(dev, sizeof(*pt), GFP_KERNEL);
+
+ if (!pt)
+ return NULL;
+ pt->dev = dev;
+
+ INIT_LIST_HEAD(&pt->cmd);
+
+ return pt;
+}
+
+static int pt_get_msix_irqs(struct pt_device *pt)
+{
+ struct pt_msix *pt_msix = pt->pt_msix;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pt_msix->msix_entry.entry = 0;
+
+ ret = pci_enable_msix_range(pdev, &pt_msix->msix_entry, 1, 1);
+ if (ret < 0)
+ return ret;
+
+ pt_msix->msix_count = ret;
+
+ pt->pt_irq = pt_msix->msix_entry.vector;
+
+ return 0;
+}
+
+static int pt_get_msi_irq(struct pt_device *pt)
+{
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_enable_msi(pdev);
+ if (ret)
+ return ret;
+
+ pt->pt_irq = pdev->irq;
+
+ return 0;
+}
+
+static int pt_get_irqs(struct pt_device *pt)
+{
+ struct device *dev = pt->dev;
+ int ret;
+
+ ret = pt_get_msix_irqs(pt);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI-X vectors, try MSI */
+ dev_err(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+ ret = pt_get_msi_irq(pt);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI interrupt */
+ dev_err(dev, "could not enable MSI (%d)\n", ret);
+
+ return ret;
+}
+
+static void pt_free_irqs(struct pt_device *pt)
+{
+ struct pt_msix *pt_msix = pt->pt_msix;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (pt_msix->msix_count)
+ pci_disable_msix(pdev);
+ else if (pt->pt_irq)
+ pci_disable_msi(pdev);
+
+ pt->pt_irq = 0;
+}
+
+static int pt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct pt_device *pt;
+ struct pt_msix *pt_msix;
+ struct device *dev = &pdev->dev;
+ void __iomem * const *iomap_table;
+ int bar_mask;
+ int ret = -ENOMEM;
+
+ pt = pt_alloc_struct(dev);
+ if (!pt)
+ goto e_err;
+
+ pt_msix = devm_kzalloc(dev, sizeof(*pt_msix), GFP_KERNEL);
+ if (!pt_msix)
+ goto e_err;
+
+ pt->pt_msix = pt_msix;
+ pt->dev_vdata = (struct pt_dev_vdata *)id->driver_data;
+ if (!pt->dev_vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pcim_enable_device failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ ret = pcim_iomap_regions(pdev, bar_mask, "ptdma");
+ if (ret) {
+ dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ iomap_table = pcim_iomap_table(pdev);
+ if (!iomap_table) {
+ dev_err(dev, "pcim_iomap_table failed\n");
+ ret = -ENOMEM;
+ goto e_err;
+ }
+
+ pt->io_regs = iomap_table[pt->dev_vdata->bar];
+ if (!pt->io_regs) {
+ dev_err(dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto e_err;
+ }
+
+ ret = pt_get_irqs(pt);
+ if (ret)
+ goto e_err;
+
+ pci_set_master(pdev);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
+ ret);
+ goto e_err;
+ }
+ }
+
+ dev_set_drvdata(dev, pt);
+
+ if (pt->dev_vdata)
+ ret = pt_core_init(pt);
+
+ if (ret)
+ goto e_err;
+
+ return 0;
+
+e_err:
+ dev_err(dev, "initialization failed ret = %d\n", ret);
+
+ return ret;
+}
+
+static void pt_pci_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pt_device *pt = dev_get_drvdata(dev);
+
+ if (!pt)
+ return;
+
+ if (pt->dev_vdata)
+ pt_core_destroy(pt);
+
+ pt_free_irqs(pt);
+}
+
+static const struct pt_dev_vdata dev_vdata[] = {
+ {
+ .bar = 2,
+ },
+};
+
+static const struct pci_device_id pt_pci_table[] = {
+ { PCI_VDEVICE(AMD, 0x1498), (kernel_ulong_t)&dev_vdata[0] },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, pt_pci_table);
+
+static struct pci_driver pt_pci_driver = {
+ .name = "ptdma",
+ .id_table = pt_pci_table,
+ .probe = pt_pci_probe,
+ .remove = pt_pci_remove,
+};
+
+module_pci_driver(pt_pci_driver);
+
+MODULE_AUTHOR("Sanjay R Mehta <sanju.mehta@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD PassThru DMA driver");
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
new file mode 100644
index 000000000000..afbf192c9230
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Passthru DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#ifndef __PT_DEV_H__
+#define __PT_DEV_H__
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+
+#include "../virt-dma.h"
+
+#define MAX_PT_NAME_LEN 16
+#define MAX_DMAPOOL_NAME_LEN 32
+
+#define MAX_HW_QUEUES 1
+#define MAX_CMD_QLEN 100
+
+#define PT_ENGINE_PASSTHRU 5
+
+/* Register Mappings */
+#define IRQ_MASK_REG 0x040
+#define IRQ_STATUS_REG 0x200
+
+#define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f)
+
+#define CMD_QUEUE_PRIO_OFFSET 0x00
+#define CMD_REQID_CONFIG_OFFSET 0x04
+#define CMD_TIMEOUT_OFFSET 0x08
+#define CMD_PT_VERSION 0x10
+
+#define CMD_Q_CONTROL_BASE 0x0000
+#define CMD_Q_TAIL_LO_BASE 0x0004
+#define CMD_Q_HEAD_LO_BASE 0x0008
+#define CMD_Q_INT_ENABLE_BASE 0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE 0x0010
+
+#define CMD_Q_STATUS_BASE 0x0100
+#define CMD_Q_INT_STATUS_BASE 0x0104
+#define CMD_Q_DMA_STATUS_BASE 0x0108
+#define CMD_Q_DMA_READ_STATUS_BASE 0x010C
+#define CMD_Q_DMA_WRITE_STATUS_BASE 0x0110
+#define CMD_Q_ABORT_BASE 0x0114
+#define CMD_Q_AX_CACHE_BASE 0x0118
+
+#define CMD_CONFIG_OFFSET 0x1120
+#define CMD_CLK_GATE_CTL_OFFSET 0x6004
+
+#define CMD_DESC_DW0_VAL 0x500012
+
+/* Address offset for virtual queue registers */
+#define CMD_Q_STATUS_INCR 0x1000
+
+/* Bit masks */
+#define CMD_CONFIG_REQID 0
+#define CMD_TIMEOUT_DISABLE 0
+#define CMD_CLK_DYN_GATING_DIS 0
+#define CMD_CLK_SW_GATE_MODE 0
+#define CMD_CLK_GATE_CTL 0
+#define CMD_QUEUE_PRIO GENMASK(2, 1)
+#define CMD_CONFIG_VHB_EN BIT(0)
+#define CMD_CLK_DYN_GATING_EN BIT(0)
+#define CMD_CLK_HW_GATE_MODE BIT(0)
+#define CMD_CLK_GATE_ON_DELAY BIT(12)
+#define CMD_CLK_GATE_OFF_DELAY BIT(12)
+
+#define CMD_CLK_GATE_CONFIG (CMD_CLK_GATE_CTL | \
+ CMD_CLK_HW_GATE_MODE | \
+ CMD_CLK_GATE_ON_DELAY | \
+ CMD_CLK_DYN_GATING_EN | \
+ CMD_CLK_GATE_OFF_DELAY)
+
+#define CMD_Q_LEN 32
+#define CMD_Q_RUN BIT(0)
+#define CMD_Q_HALT BIT(1)
+#define CMD_Q_MEM_LOCATION BIT(2)
+#define CMD_Q_SIZE_MASK GENMASK(4, 0)
+#define CMD_Q_SIZE GENMASK(7, 3)
+#define CMD_Q_SHIFT GENMASK(1, 0)
+#define QUEUE_SIZE_VAL ((ffs(CMD_Q_LEN) - 2) & \
+ CMD_Q_SIZE_MASK)
+#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1)
+#define Q_DESC_SIZE sizeof(struct ptdma_desc)
+#define Q_SIZE(n) (CMD_Q_LEN * (n))
+
+#define INT_COMPLETION BIT(0)
+#define INT_ERROR BIT(1)
+#define INT_QUEUE_STOPPED BIT(2)
+#define INT_EMPTY_QUEUE BIT(3)
+#define SUPPORTED_INTERRUPTS (INT_COMPLETION | INT_ERROR)
+
+/****** Local Storage Block ******/
+#define LSB_START 0
+#define LSB_END 127
+#define LSB_COUNT (LSB_END - LSB_START + 1)
+
+#define PT_DMAPOOL_MAX_SIZE 64
+#define PT_DMAPOOL_ALIGN BIT(5)
+
+#define PT_PASSTHRU_BLOCKSIZE 512
+
+struct pt_device;
+
+struct pt_tasklet_data {
+ struct completion completion;
+ struct pt_cmd *cmd;
+};
+
+/*
+ * struct pt_passthru_engine - pass-through operation
+ * without performing DMA mapping
+ * @mask: mask to be applied to data
+ * @mask_len: length in bytes of mask
+ * @src_dma: data to be used for this operation
+ * @dst_dma: data produced by this operation
+ * @src_len: length in bytes of data used for this operation
+ *
+ * Variables required to be set when calling pt_enqueue_cmd():
+ * - bit_mod, byte_swap, src, dst, src_len
+ * - mask, mask_len if bit_mod is not PT_PASSTHRU_BITWISE_NOOP
+ */
+struct pt_passthru_engine {
+ dma_addr_t mask;
+ u32 mask_len; /* In bytes */
+
+ dma_addr_t src_dma, dst_dma;
+ u64 src_len; /* In bytes */
+};
+
+/*
+ * struct pt_cmd - PTDMA operation request
+ * @entry: list element
+ * @work: work element used for callbacks
+ * @pt: PT device to be run on
+ * @ret: operation return code
+ * @flags: cmd processing flags
+ * @engine: PTDMA operation to perform (passthru)
+ * @engine_error: PT engine return code
+ * @passthru: engine specific structures, refer to specific engine struct below
+ * @callback: operation completion callback function
+ * @data: parameter value to be supplied to the callback function
+ *
+ * Variables required to be set when calling pt_enqueue_cmd():
+ * - engine, callback
+ * - See the operation structures below for what is required for each
+ * operation.
+ */
+struct pt_cmd {
+ struct list_head entry;
+ struct work_struct work;
+ struct pt_device *pt;
+ int ret;
+ u32 engine;
+ u32 engine_error;
+ struct pt_passthru_engine passthru;
+ /* Completion callback support */
+ void (*pt_cmd_callback)(void *data, int err);
+ void *data;
+};
+
+struct pt_dma_desc {
+ struct virt_dma_desc vd;
+ struct pt_device *pt;
+ enum dma_status status;
+ size_t len;
+ bool issued_to_hw;
+ struct pt_cmd pt_cmd;
+};
+
+struct pt_dma_chan {
+ struct virt_dma_chan vc;
+ struct pt_device *pt;
+};
+
+struct pt_cmd_queue {
+ struct pt_device *pt;
+
+ /* Queue dma pool */
+ struct dma_pool *dma_pool;
+
+ /* Queue base address (not neccessarily aligned)*/
+ struct ptdma_desc *qbase;
+
+ /* Aligned queue start address (per requirement) */
+ struct mutex q_mutex ____cacheline_aligned;
+ unsigned int qidx;
+
+ unsigned int qsize;
+ dma_addr_t qbase_dma;
+ dma_addr_t qdma_tail;
+
+ unsigned int active;
+ unsigned int suspended;
+
+ /* Register addresses for queue */
+ void __iomem *reg_control;
+ u32 qcontrol; /* Cached control register */
+
+ /* Status values from job */
+ u32 int_status;
+ u32 q_status;
+ u32 q_int_status;
+ u32 cmd_error;
+ /* Queue Statistics */
+ unsigned long total_pt_ops;
+} ____cacheline_aligned;
+
+struct pt_device {
+ struct list_head entry;
+
+ unsigned int ord;
+ char name[MAX_PT_NAME_LEN];
+
+ struct device *dev;
+
+ /* Bus specific device information */
+ struct pt_msix *pt_msix;
+
+ struct pt_dev_vdata *dev_vdata;
+
+ unsigned int pt_irq;
+
+ /* I/O area used for device communication */
+ void __iomem *io_regs;
+
+ spinlock_t cmd_lock ____cacheline_aligned;
+ unsigned int cmd_count;
+ struct list_head cmd;
+
+ /*
+ * The command queue. This represent the queue available on the
+ * PTDMA that are available for processing cmds
+ */
+ struct pt_cmd_queue cmd_q;
+
+ /* Support for the DMA Engine capabilities */
+ struct dma_device dma_dev;
+ struct pt_dma_chan *pt_dma_chan;
+ struct kmem_cache *dma_cmd_cache;
+ struct kmem_cache *dma_desc_cache;
+
+ wait_queue_head_t lsb_queue;
+
+ /* Device Statistics */
+ unsigned long total_interrupts;
+
+ struct pt_tasklet_data tdata;
+};
+
+/*
+ * descriptor for PTDMA commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory type
+ * word 6: reserved 32 bits
+ * word 7: reserved 32 bits
+ */
+
+#define DWORD0_SOC BIT(0)
+#define DWORD0_IOC BIT(1)
+
+struct dword3 {
+ unsigned int src_hi:16;
+ unsigned int src_mem:2;
+ unsigned int lsb_cxt_id:8;
+ unsigned int rsvd1:5;
+ unsigned int fixed:1;
+};
+
+struct dword5 {
+ unsigned int dst_hi:16;
+ unsigned int dst_mem:2;
+ unsigned int rsvd1:13;
+ unsigned int fixed:1;
+};
+
+struct ptdma_desc {
+ u32 dw0;
+ u32 length;
+ u32 src_lo;
+ struct dword3 dw3;
+ u32 dst_lo;
+ struct dword5 dw5;
+ __le32 rsvd1;
+ __le32 rsvd2;
+};
+
+/* Structure to hold PT device data */
+struct pt_dev_vdata {
+ const unsigned int bar;
+};
+
+int pt_dmaengine_register(struct pt_device *pt);
+void pt_dmaengine_unregister(struct pt_device *pt);
+
+void ptdma_debugfs_setup(struct pt_device *pt);
+int pt_core_init(struct pt_device *pt);
+void pt_core_destroy(struct pt_device *pt);
+
+int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
+ struct pt_passthru_engine *pt_engine);
+
+void pt_start_queue(struct pt_cmd_queue *cmd_q);
+void pt_stop_queue(struct pt_cmd_queue *cmd_q);
+
+#endif
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 13437323a85b..a46296285307 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -47,3 +47,12 @@ config RENESAS_USB_DMAC
help
This driver supports the USB-DMA controller found in the Renesas
SoCs.
+
+config RZ_DMAC
+ tristate "Renesas RZ/G2L DMA Controller"
+ depends on ARCH_R9A07G044 || COMPILE_TEST
+ select RENESAS_DMA
+ select DMA_VIRTUAL_CHANNELS
+ help
+ This driver supports the general purpose DMA controller found in the
+ Renesas RZ/G2L SoC variants.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index abdf10341725..360ab6d25e76 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
+obj-$(CONFIG_RZ_DMAC) += rz-dmac.o
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
new file mode 100644
index 000000000000..f9f30cbeccbe
--- /dev/null
+++ b/drivers/dma/sh/rz-dmac.c
@@ -0,0 +1,969 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2L DMA Controller Driver
+ *
+ * Based on imx-dma.c
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+enum rz_dmac_prep_type {
+ RZ_DMAC_DESC_MEMCPY,
+ RZ_DMAC_DESC_SLAVE_SG,
+};
+
+struct rz_lmdesc {
+ u32 header;
+ u32 sa;
+ u32 da;
+ u32 tb;
+ u32 chcfg;
+ u32 chitvl;
+ u32 chext;
+ u32 nxla;
+};
+
+struct rz_dmac_desc {
+ struct virt_dma_desc vd;
+ dma_addr_t src;
+ dma_addr_t dest;
+ size_t len;
+ struct list_head node;
+ enum dma_transfer_direction direction;
+ enum rz_dmac_prep_type type;
+ /* For slave sg */
+ struct scatterlist *sg;
+ unsigned int sgcount;
+};
+
+#define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd)
+
+struct rz_dmac_chan {
+ struct virt_dma_chan vc;
+ void __iomem *ch_base;
+ void __iomem *ch_cmn_base;
+ unsigned int index;
+ int irq;
+ struct rz_dmac_desc *desc;
+ int descs_allocated;
+
+ enum dma_slave_buswidth src_word_size;
+ enum dma_slave_buswidth dst_word_size;
+ dma_addr_t src_per_address;
+ dma_addr_t dst_per_address;
+
+ u32 chcfg;
+ u32 chctrl;
+ int mid_rid;
+
+ struct list_head ld_free;
+ struct list_head ld_queue;
+ struct list_head ld_active;
+
+ struct {
+ struct rz_lmdesc *base;
+ struct rz_lmdesc *head;
+ struct rz_lmdesc *tail;
+ dma_addr_t base_dma;
+ } lmdesc;
+};
+
+#define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan)
+
+struct rz_dmac {
+ struct dma_device engine;
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *ext_base;
+
+ unsigned int n_channels;
+ struct rz_dmac_chan *channels;
+
+ DECLARE_BITMAP(modules, 1024);
+};
+
+#define to_rz_dmac(d) container_of(d, struct rz_dmac, engine)
+
+/*
+ * -----------------------------------------------------------------------------
+ * Registers
+ */
+
+#define CHSTAT 0x0024
+#define CHCTRL 0x0028
+#define CHCFG 0x002c
+#define NXLA 0x0038
+
+#define DCTRL 0x0000
+
+#define EACH_CHANNEL_OFFSET 0x0040
+#define CHANNEL_0_7_OFFSET 0x0000
+#define CHANNEL_0_7_COMMON_BASE 0x0300
+#define CHANNEL_8_15_OFFSET 0x0400
+#define CHANNEL_8_15_COMMON_BASE 0x0700
+
+#define CHSTAT_ER BIT(4)
+#define CHSTAT_EN BIT(0)
+
+#define CHCTRL_CLRINTMSK BIT(17)
+#define CHCTRL_CLRSUS BIT(9)
+#define CHCTRL_CLRTC BIT(6)
+#define CHCTRL_CLREND BIT(5)
+#define CHCTRL_CLRRQ BIT(4)
+#define CHCTRL_SWRST BIT(3)
+#define CHCTRL_STG BIT(2)
+#define CHCTRL_CLREN BIT(1)
+#define CHCTRL_SETEN BIT(0)
+#define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \
+ CHCTRL_CLRTC | CHCTRL_CLREND | \
+ CHCTRL_CLRRQ | CHCTRL_SWRST | \
+ CHCTRL_CLREN)
+
+#define CHCFG_DMS BIT(31)
+#define CHCFG_DEM BIT(24)
+#define CHCFG_DAD BIT(21)
+#define CHCFG_SAD BIT(20)
+#define CHCFG_REQD BIT(3)
+#define CHCFG_SEL(bits) ((bits) & 0x07)
+#define CHCFG_MEM_COPY (0x80400008)
+#define CHCFG_FILL_DDS(a) (((a) << 16) & GENMASK(19, 16))
+#define CHCFG_FILL_SDS(a) (((a) << 12) & GENMASK(15, 12))
+#define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22)
+#define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6)
+#define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5)
+#define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5)
+
+#define MID_RID_MASK GENMASK(9, 0)
+#define CHCFG_MASK GENMASK(15, 10)
+#define CHCFG_DS_INVALID 0xFF
+#define DCTRL_LVINT BIT(1)
+#define DCTRL_PR BIT(0)
+#define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR)
+
+/* LINK MODE DESCRIPTOR */
+#define HEADER_LV BIT(0)
+
+#define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16
+#define RZ_DMAC_MAX_CHANNELS 16
+#define DMAC_NR_LMDESC 64
+
+/*
+ * -----------------------------------------------------------------------------
+ * Device access
+ */
+
+static void rz_dmac_writel(struct rz_dmac *dmac, unsigned int val,
+ unsigned int offset)
+{
+ writel(val, dmac->base + offset);
+}
+
+static void rz_dmac_ext_writel(struct rz_dmac *dmac, unsigned int val,
+ unsigned int offset)
+{
+ writel(val, dmac->ext_base + offset);
+}
+
+static u32 rz_dmac_ext_readl(struct rz_dmac *dmac, unsigned int offset)
+{
+ return readl(dmac->ext_base + offset);
+}
+
+static void rz_dmac_ch_writel(struct rz_dmac_chan *channel, unsigned int val,
+ unsigned int offset, int which)
+{
+ if (which)
+ writel(val, channel->ch_base + offset);
+ else
+ writel(val, channel->ch_cmn_base + offset);
+}
+
+static u32 rz_dmac_ch_readl(struct rz_dmac_chan *channel,
+ unsigned int offset, int which)
+{
+ if (which)
+ return readl(channel->ch_base + offset);
+ else
+ return readl(channel->ch_cmn_base + offset);
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+static void rz_lmdesc_setup(struct rz_dmac_chan *channel,
+ struct rz_lmdesc *lmdesc)
+{
+ u32 nxla;
+
+ channel->lmdesc.base = lmdesc;
+ channel->lmdesc.head = lmdesc;
+ channel->lmdesc.tail = lmdesc;
+ nxla = channel->lmdesc.base_dma;
+ while (lmdesc < (channel->lmdesc.base + (DMAC_NR_LMDESC - 1))) {
+ lmdesc->header = 0;
+ nxla += sizeof(*lmdesc);
+ lmdesc->nxla = nxla;
+ lmdesc++;
+ }
+
+ lmdesc->header = 0;
+ lmdesc->nxla = channel->lmdesc.base_dma;
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * Descriptors preparation
+ */
+
+static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan *channel)
+{
+ struct rz_lmdesc *lmdesc = channel->lmdesc.head;
+
+ while (!(lmdesc->header & HEADER_LV)) {
+ lmdesc->header = 0;
+ lmdesc++;
+ if (lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
+ lmdesc = channel->lmdesc.base;
+ }
+ channel->lmdesc.head = lmdesc;
+}
+
+static void rz_dmac_enable_hw(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ unsigned long flags;
+ u32 nxla;
+ u32 chctrl;
+ u32 chstat;
+
+ dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
+
+ local_irq_save(flags);
+
+ rz_dmac_lmdesc_recycle(channel);
+
+ nxla = channel->lmdesc.base_dma +
+ (sizeof(struct rz_lmdesc) * (channel->lmdesc.head -
+ channel->lmdesc.base));
+
+ chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
+ if (!(chstat & CHSTAT_EN)) {
+ chctrl = (channel->chctrl | CHCTRL_SETEN);
+ rz_dmac_ch_writel(channel, nxla, NXLA, 1);
+ rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1);
+ rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1);
+ rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1);
+ }
+
+ local_irq_restore(flags);
+}
+
+static void rz_dmac_disable_hw(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ unsigned long flags;
+
+ dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
+
+ local_irq_save(flags);
+ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
+ local_irq_restore(flags);
+}
+
+static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars)
+{
+ u32 dmars_offset = (nr / 2) * 4;
+ u32 shift = (nr % 2) * 16;
+ u32 dmars32;
+
+ dmars32 = rz_dmac_ext_readl(dmac, dmars_offset);
+ dmars32 &= ~(0xffff << shift);
+ dmars32 |= dmars << shift;
+
+ rz_dmac_ext_writel(dmac, dmars32, dmars_offset);
+}
+
+static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_lmdesc *lmdesc = channel->lmdesc.tail;
+ struct rz_dmac_desc *d = channel->desc;
+ u32 chcfg = CHCFG_MEM_COPY;
+
+ /* prepare descriptor */
+ lmdesc->sa = d->src;
+ lmdesc->da = d->dest;
+ lmdesc->tb = d->len;
+ lmdesc->chcfg = chcfg;
+ lmdesc->chitvl = 0;
+ lmdesc->chext = 0;
+ lmdesc->header = HEADER_LV;
+
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+
+ channel->chcfg = chcfg;
+ channel->chctrl = CHCTRL_STG | CHCTRL_SETEN;
+}
+
+static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_dmac_desc *d = channel->desc;
+ struct scatterlist *sg, *sgl = d->sg;
+ struct rz_lmdesc *lmdesc;
+ unsigned int i, sg_len = d->sgcount;
+
+ channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DEM | CHCFG_DMS;
+
+ if (d->direction == DMA_DEV_TO_MEM) {
+ channel->chcfg |= CHCFG_SAD;
+ channel->chcfg &= ~CHCFG_REQD;
+ } else {
+ channel->chcfg |= CHCFG_DAD | CHCFG_REQD;
+ }
+
+ lmdesc = channel->lmdesc.tail;
+
+ for (i = 0, sg = sgl; i < sg_len; i++, sg = sg_next(sg)) {
+ if (d->direction == DMA_DEV_TO_MEM) {
+ lmdesc->sa = channel->src_per_address;
+ lmdesc->da = sg_dma_address(sg);
+ } else {
+ lmdesc->sa = sg_dma_address(sg);
+ lmdesc->da = channel->dst_per_address;
+ }
+
+ lmdesc->tb = sg_dma_len(sg);
+ lmdesc->chitvl = 0;
+ lmdesc->chext = 0;
+ if (i == (sg_len - 1)) {
+ lmdesc->chcfg = (channel->chcfg & ~CHCFG_DEM);
+ lmdesc->header = HEADER_LV;
+ } else {
+ lmdesc->chcfg = channel->chcfg;
+ lmdesc->header = HEADER_LV;
+ }
+ if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
+ lmdesc = channel->lmdesc.base;
+ }
+
+ channel->lmdesc.tail = lmdesc;
+
+ rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
+ channel->chctrl = CHCTRL_SETEN;
+}
+
+static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan)
+{
+ struct rz_dmac_desc *d = chan->desc;
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&chan->vc);
+ if (!vd)
+ return 0;
+
+ list_del(&vd->node);
+
+ switch (d->type) {
+ case RZ_DMAC_DESC_MEMCPY:
+ rz_dmac_prepare_desc_for_memcpy(chan);
+ break;
+
+ case RZ_DMAC_DESC_SLAVE_SG:
+ rz_dmac_prepare_descs_for_slave_sg(chan);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ rz_dmac_enable_hw(chan);
+
+ return 0;
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * DMA engine operations
+ */
+
+static int rz_dmac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+
+ while (channel->descs_allocated < RZ_DMAC_MAX_CHAN_DESCRIPTORS) {
+ struct rz_dmac_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ break;
+
+ list_add_tail(&desc->node, &channel->ld_free);
+ channel->descs_allocated++;
+ }
+
+ if (!channel->descs_allocated)
+ return -ENOMEM;
+
+ return channel->descs_allocated;
+}
+
+static void rz_dmac_free_chan_resources(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_lmdesc *lmdesc = channel->lmdesc.base;
+ struct rz_dmac_desc *desc, *_desc;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&channel->vc.lock, flags);
+
+ for (i = 0; i < DMAC_NR_LMDESC; i++)
+ lmdesc[i].header = 0;
+
+ rz_dmac_disable_hw(channel);
+ list_splice_tail_init(&channel->ld_active, &channel->ld_free);
+ list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
+
+ if (channel->mid_rid >= 0) {
+ clear_bit(channel->mid_rid, dmac->modules);
+ channel->mid_rid = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+
+ list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) {
+ kfree(desc);
+ channel->descs_allocated--;
+ }
+
+ INIT_LIST_HEAD(&channel->ld_free);
+ vchan_free_chan_resources(&channel->vc);
+}
+
+static struct dma_async_tx_descriptor *
+rz_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_dmac_desc *desc;
+
+ dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n",
+ __func__, channel->index, &src, &dest, len);
+
+ if (list_empty(&channel->ld_free))
+ return NULL;
+
+ desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
+
+ desc->type = RZ_DMAC_DESC_MEMCPY;
+ desc->src = src;
+ desc->dest = dest;
+ desc->len = len;
+ desc->direction = DMA_MEM_TO_MEM;
+
+ list_move_tail(channel->ld_free.next, &channel->ld_queue);
+ return vchan_tx_prep(&channel->vc, &desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac_desc *desc;
+ struct scatterlist *sg;
+ int dma_length = 0;
+ int i = 0;
+
+ if (list_empty(&channel->ld_free))
+ return NULL;
+
+ desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_length += sg_dma_len(sg);
+ }
+
+ desc->type = RZ_DMAC_DESC_SLAVE_SG;
+ desc->sg = sgl;
+ desc->sgcount = sg_len;
+ desc->len = dma_length;
+ desc->direction = direction;
+
+ if (direction == DMA_DEV_TO_MEM)
+ desc->src = channel->src_per_address;
+ else
+ desc->dest = channel->dst_per_address;
+
+ list_move_tail(channel->ld_free.next, &channel->ld_queue);
+ return vchan_tx_prep(&channel->vc, &desc->vd, flags);
+}
+
+static int rz_dmac_terminate_all(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ rz_dmac_disable_hw(channel);
+ spin_lock_irqsave(&channel->vc.lock, flags);
+ list_splice_tail_init(&channel->ld_active, &channel->ld_free);
+ list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+ vchan_get_all_descriptors(&channel->vc, &head);
+ vchan_dma_desc_free_list(&channel->vc, &head);
+
+ return 0;
+}
+
+static void rz_dmac_issue_pending(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_dmac_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->vc.lock, flags);
+
+ if (!list_empty(&channel->ld_queue)) {
+ desc = list_first_entry(&channel->ld_queue,
+ struct rz_dmac_desc, node);
+ channel->desc = desc;
+ if (vchan_issue_pending(&channel->vc)) {
+ if (rz_dmac_xfer_desc(channel) < 0)
+ dev_warn(dmac->dev, "ch: %d couldn't issue DMA xfer\n",
+ channel->index);
+ else
+ list_move_tail(channel->ld_queue.next,
+ &channel->ld_active);
+ }
+ }
+
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+}
+
+static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds)
+{
+ u8 i;
+ const enum dma_slave_buswidth ds_lut[] = {
+ DMA_SLAVE_BUSWIDTH_1_BYTE,
+ DMA_SLAVE_BUSWIDTH_2_BYTES,
+ DMA_SLAVE_BUSWIDTH_4_BYTES,
+ DMA_SLAVE_BUSWIDTH_8_BYTES,
+ DMA_SLAVE_BUSWIDTH_16_BYTES,
+ DMA_SLAVE_BUSWIDTH_32_BYTES,
+ DMA_SLAVE_BUSWIDTH_64_BYTES,
+ DMA_SLAVE_BUSWIDTH_128_BYTES,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(ds_lut); i++) {
+ if (ds_lut[i] == ds)
+ return i;
+ }
+
+ return CHCFG_DS_INVALID;
+}
+
+static int rz_dmac_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ u32 val;
+
+ channel->src_per_address = config->src_addr;
+ channel->src_word_size = config->src_addr_width;
+ channel->dst_per_address = config->dst_addr;
+ channel->dst_word_size = config->dst_addr_width;
+
+ val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
+ if (val == CHCFG_DS_INVALID)
+ return -EINVAL;
+
+ channel->chcfg |= CHCFG_FILL_DDS(val);
+
+ val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
+ if (val == CHCFG_DS_INVALID)
+ return -EINVAL;
+
+ channel->chcfg |= CHCFG_FILL_SDS(val);
+
+ return 0;
+}
+
+static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd)
+{
+ /*
+ * Place holder
+ * Descriptor allocation is done during alloc_chan_resources and
+ * get freed during free_chan_resources.
+ * list is used to manage the descriptors and avoid any memory
+ * allocation/free during DMA read/write.
+ */
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * IRQ handling
+ */
+
+static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ u32 chstat, chctrl;
+
+ chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
+ if (chstat & CHSTAT_ER) {
+ dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n",
+ channel->index, chstat);
+ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
+ goto done;
+ }
+
+ chctrl = rz_dmac_ch_readl(channel, CHCTRL, 1);
+ rz_dmac_ch_writel(channel, chctrl | CHCTRL_CLREND, CHCTRL, 1);
+done:
+ return;
+}
+
+static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id)
+{
+ struct rz_dmac_chan *channel = dev_id;
+
+ if (channel) {
+ rz_dmac_irq_handle_channel(channel);
+ return IRQ_WAKE_THREAD;
+ }
+ /* handle DMAERR irq */
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id)
+{
+ struct rz_dmac_chan *channel = dev_id;
+ struct rz_dmac_desc *desc = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->vc.lock, flags);
+
+ if (list_empty(&channel->ld_active)) {
+ /* Someone might have called terminate all */
+ goto out;
+ }
+
+ desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node);
+ vchan_cookie_complete(&desc->vd);
+ list_move_tail(channel->ld_active.next, &channel->ld_free);
+ if (!list_empty(&channel->ld_queue)) {
+ desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc,
+ node);
+ channel->desc = desc;
+ if (rz_dmac_xfer_desc(channel) == 0)
+ list_move_tail(channel->ld_queue.next, &channel->ld_active);
+ }
+out:
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * OF xlate and channel filter
+ */
+
+static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct of_phandle_args *dma_spec = arg;
+ u32 ch_cfg;
+
+ channel->mid_rid = dma_spec->args[0] & MID_RID_MASK;
+ ch_cfg = (dma_spec->args[0] & CHCFG_MASK) >> 10;
+ channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) |
+ CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg);
+
+ return !test_and_set_bit(channel->mid_rid, dmac->modules);
+}
+
+static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ dma_cap_mask_t mask;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ /* Only slave DMA channels can be allocated via DT */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec);
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+static int rz_dmac_chan_probe(struct rz_dmac *dmac,
+ struct rz_dmac_chan *channel,
+ unsigned int index)
+{
+ struct platform_device *pdev = to_platform_device(dmac->dev);
+ struct rz_lmdesc *lmdesc;
+ char pdev_irqname[5];
+ char *irqname;
+ int ret;
+
+ channel->index = index;
+ channel->mid_rid = -EINVAL;
+
+ /* Request the channel interrupt. */
+ sprintf(pdev_irqname, "ch%u", index);
+ channel->irq = platform_get_irq_byname(pdev, pdev_irqname);
+ if (channel->irq < 0)
+ return channel->irq;
+
+ irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
+ dev_name(dmac->dev), index);
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(dmac->dev, channel->irq,
+ rz_dmac_irq_handler,
+ rz_dmac_irq_handler_thread, 0,
+ irqname, channel);
+ if (ret) {
+ dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
+ channel->irq, ret);
+ return ret;
+ }
+
+ /* Set io base address for each channel */
+ if (index < 8) {
+ channel->ch_base = dmac->base + CHANNEL_0_7_OFFSET +
+ EACH_CHANNEL_OFFSET * index;
+ channel->ch_cmn_base = dmac->base + CHANNEL_0_7_COMMON_BASE;
+ } else {
+ channel->ch_base = dmac->base + CHANNEL_8_15_OFFSET +
+ EACH_CHANNEL_OFFSET * (index - 8);
+ channel->ch_cmn_base = dmac->base + CHANNEL_8_15_COMMON_BASE;
+ }
+
+ /* Allocate descriptors */
+ lmdesc = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
+ &channel->lmdesc.base_dma, GFP_KERNEL);
+ if (!lmdesc) {
+ dev_err(&pdev->dev, "Can't allocate memory (lmdesc)\n");
+ return -ENOMEM;
+ }
+ rz_lmdesc_setup(channel, lmdesc);
+
+ /* Initialize register for each channel */
+ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
+
+ channel->vc.desc_free = rz_dmac_virt_desc_free;
+ vchan_init(&channel->vc, &dmac->engine);
+ INIT_LIST_HEAD(&channel->ld_queue);
+ INIT_LIST_HEAD(&channel->ld_free);
+ INIT_LIST_HEAD(&channel->ld_active);
+
+ return 0;
+}
+
+static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
+ if (ret < 0) {
+ dev_err(dev, "unable to read dma-channels property\n");
+ return ret;
+ }
+
+ if (!dmac->n_channels || dmac->n_channels > RZ_DMAC_MAX_CHANNELS) {
+ dev_err(dev, "invalid number of channels %u\n", dmac->n_channels);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rz_dmac_probe(struct platform_device *pdev)
+{
+ const char *irqname = "error";
+ struct dma_device *engine;
+ struct rz_dmac *dmac;
+ int channel_num;
+ unsigned int i;
+ int ret;
+ int irq;
+
+ dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+ if (!dmac)
+ return -ENOMEM;
+
+ dmac->dev = &pdev->dev;
+ platform_set_drvdata(pdev, dmac);
+
+ ret = rz_dmac_parse_of(&pdev->dev, dmac);
+ if (ret < 0)
+ return ret;
+
+ dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
+ sizeof(*dmac->channels), GFP_KERNEL);
+ if (!dmac->channels)
+ return -ENOMEM;
+
+ /* Request resources */
+ dmac->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dmac->base))
+ return PTR_ERR(dmac->base);
+
+ dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dmac->ext_base))
+ return PTR_ERR(dmac->ext_base);
+
+ /* Register interrupt handler for error */
+ irq = platform_get_irq_byname(pdev, irqname);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0,
+ irqname, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
+ irq, ret);
+ return ret;
+ }
+
+ /* Initialize the channels. */
+ INIT_LIST_HEAD(&dmac->engine.channels);
+
+ for (i = 0; i < dmac->n_channels; i++) {
+ ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* Register the DMAC as a DMA provider for DT. */
+ ret = of_dma_controller_register(pdev->dev.of_node, rz_dmac_of_xlate,
+ NULL);
+ if (ret < 0)
+ goto err;
+
+ /* Register the DMA engine device. */
+ engine = &dmac->engine;
+ dma_cap_set(DMA_SLAVE, engine->cap_mask);
+ dma_cap_set(DMA_MEMCPY, engine->cap_mask);
+ rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL);
+ rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL);
+
+ engine->dev = &pdev->dev;
+
+ engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources;
+ engine->device_free_chan_resources = rz_dmac_free_chan_resources;
+ engine->device_tx_status = dma_cookie_status;
+ engine->device_prep_slave_sg = rz_dmac_prep_slave_sg;
+ engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy;
+ engine->device_config = rz_dmac_config;
+ engine->device_terminate_all = rz_dmac_terminate_all;
+ engine->device_issue_pending = rz_dmac_issue_pending;
+
+ engine->copy_align = DMAENGINE_ALIGN_1_BYTE;
+ dma_set_max_seg_size(engine->dev, U32_MAX);
+
+ ret = dma_async_device_register(engine);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register\n");
+ goto dma_register_err;
+ }
+ return 0;
+
+dma_register_err:
+ of_dma_controller_free(pdev->dev.of_node);
+err:
+ channel_num = i ? i - 1 : 0;
+ for (i = 0; i < channel_num; i++) {
+ struct rz_dmac_chan *channel = &dmac->channels[i];
+
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
+ channel->lmdesc.base,
+ channel->lmdesc.base_dma);
+ }
+
+ return ret;
+}
+
+static int rz_dmac_remove(struct platform_device *pdev)
+{
+ struct rz_dmac *dmac = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < dmac->n_channels; i++) {
+ struct rz_dmac_chan *channel = &dmac->channels[i];
+
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
+ channel->lmdesc.base,
+ channel->lmdesc.base_dma);
+ }
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&dmac->engine);
+
+ return 0;
+}
+
+static const struct of_device_id of_rz_dmac_match[] = {
+ { .compatible = "renesas,rz-dmac", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_rz_dmac_match);
+
+static struct platform_driver rz_dmac_driver = {
+ .driver = {
+ .name = "rz-dmac",
+ .of_match_table = of_rz_dmac_match,
+ },
+ .probe = rz_dmac_probe,
+ .remove = rz_dmac_remove,
+};
+
+module_platform_driver(rz_dmac_driver);
+
+MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver");
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 1cc06900153e..5edaeb89d1e6 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -466,7 +466,7 @@ static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
static unsigned int usb_dmac_get_current_residue(struct usb_dmac_chan *chan,
struct usb_dmac_desc *desc,
- int sg_index)
+ unsigned int sg_index)
{
struct usb_dmac_sg *sg = desc->sg + sg_index;
u32 mem_addr = sg->mem_addr & 0xffffffff;
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 0ef5ca81ba4d..4357d2395e6b 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1265,6 +1265,7 @@ static const struct of_device_id sprd_dma_match[] = {
{ .compatible = "sprd,sc9860-dma", },
{},
};
+MODULE_DEVICE_TABLE(of, sprd_dma_match);
static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
{
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 7dd1d3d0bf06..9063c727962e 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -60,6 +60,7 @@
#define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11)
#define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6)
#define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6)
+#define STM32_DMA_SCR_TRBUFF BIT(20) /* Bufferable transfer for USART/UART */
#define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */
#define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */
#define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */
@@ -138,8 +139,9 @@
#define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
#define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
#define STM32_DMA_DIRECT_MODE_MASK BIT(2)
-#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) \
- >> 2)
+#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) >> 2)
+#define STM32_DMA_ALT_ACK_MODE_MASK BIT(4)
+#define STM32_DMA_ALT_ACK_MODE_GET(n) (((n) & STM32_DMA_ALT_ACK_MODE_MASK) >> 4)
enum stm32_dma_width {
STM32_DMA_BYTE,
@@ -1252,6 +1254,8 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan,
chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
if (STM32_DMA_DIRECT_MODE_GET(cfg->features))
chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE;
+ if (STM32_DMA_ALT_ACK_MODE_GET(cfg->features))
+ chan->chan_reg.dma_scr |= STM32_DMA_SCR_TRBUFF;
}
static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 4735742e826d..b1115a6d1935 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -655,9 +655,8 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc)
return ret;
}
- ret = pm_runtime_get_sync(tdc2dev(tdc));
+ ret = pm_runtime_resume_and_get(tdc2dev(tdc));
if (ret < 0) {
- pm_runtime_put_noidle(tdc2dev(tdc));
free_irq(tdc->irq, tdc);
return ret;
}
@@ -869,10 +868,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ if (ret < 0)
goto rpm_disable;
- }
ret = tegra_adma_init(tdma);
if (ret)
diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c
index 7580870ed746..34e3fc565a37 100644
--- a/drivers/dma/ti/k3-psil-j721e.c
+++ b/drivers/dma/ti/k3-psil-j721e.c
@@ -58,6 +58,14 @@
}, \
}
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
static struct psil_ep j721e_src_ep_map[] = {
/* SA2UL */
@@ -138,6 +146,71 @@ static struct psil_ep j721e_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4707),
PSIL_PDMA_XY_PKT(0x4708),
PSIL_PDMA_XY_PKT(0x4709),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x4940),
+ PSIL_CSI2RX(0x4941),
+ PSIL_CSI2RX(0x4942),
+ PSIL_CSI2RX(0x4943),
+ PSIL_CSI2RX(0x4944),
+ PSIL_CSI2RX(0x4945),
+ PSIL_CSI2RX(0x4946),
+ PSIL_CSI2RX(0x4947),
+ PSIL_CSI2RX(0x4948),
+ PSIL_CSI2RX(0x4949),
+ PSIL_CSI2RX(0x494a),
+ PSIL_CSI2RX(0x494b),
+ PSIL_CSI2RX(0x494c),
+ PSIL_CSI2RX(0x494d),
+ PSIL_CSI2RX(0x494e),
+ PSIL_CSI2RX(0x494f),
+ PSIL_CSI2RX(0x4950),
+ PSIL_CSI2RX(0x4951),
+ PSIL_CSI2RX(0x4952),
+ PSIL_CSI2RX(0x4953),
+ PSIL_CSI2RX(0x4954),
+ PSIL_CSI2RX(0x4955),
+ PSIL_CSI2RX(0x4956),
+ PSIL_CSI2RX(0x4957),
+ PSIL_CSI2RX(0x4958),
+ PSIL_CSI2RX(0x4959),
+ PSIL_CSI2RX(0x495a),
+ PSIL_CSI2RX(0x495b),
+ PSIL_CSI2RX(0x495c),
+ PSIL_CSI2RX(0x495d),
+ PSIL_CSI2RX(0x495e),
+ PSIL_CSI2RX(0x495f),
+ PSIL_CSI2RX(0x4960),
+ PSIL_CSI2RX(0x4961),
+ PSIL_CSI2RX(0x4962),
+ PSIL_CSI2RX(0x4963),
+ PSIL_CSI2RX(0x4964),
+ PSIL_CSI2RX(0x4965),
+ PSIL_CSI2RX(0x4966),
+ PSIL_CSI2RX(0x4967),
+ PSIL_CSI2RX(0x4968),
+ PSIL_CSI2RX(0x4969),
+ PSIL_CSI2RX(0x496a),
+ PSIL_CSI2RX(0x496b),
+ PSIL_CSI2RX(0x496c),
+ PSIL_CSI2RX(0x496d),
+ PSIL_CSI2RX(0x496e),
+ PSIL_CSI2RX(0x496f),
+ PSIL_CSI2RX(0x4970),
+ PSIL_CSI2RX(0x4971),
+ PSIL_CSI2RX(0x4972),
+ PSIL_CSI2RX(0x4973),
+ PSIL_CSI2RX(0x4974),
+ PSIL_CSI2RX(0x4975),
+ PSIL_CSI2RX(0x4976),
+ PSIL_CSI2RX(0x4977),
+ PSIL_CSI2RX(0x4978),
+ PSIL_CSI2RX(0x4979),
+ PSIL_CSI2RX(0x497a),
+ PSIL_CSI2RX(0x497b),
+ PSIL_CSI2RX(0x497c),
+ PSIL_CSI2RX(0x497d),
+ PSIL_CSI2RX(0x497e),
+ PSIL_CSI2RX(0x497f),
/* CPSW9 */
PSIL_ETHERNET(0x4a00),
/* CPSW0 */
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 4b9530a7bf65..a4450bc95466 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -1420,8 +1420,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
chan->desc_submitcount++;
chan->desc_pendingcount--;
- list_del(&desc->node);
- list_add_tail(&desc->node, &chan->active_list);
+ list_move_tail(&desc->node, &chan->active_list);
if (chan->desc_submitcount == chan->num_frms)
chan->desc_submitcount = 0;
@@ -1659,6 +1658,17 @@ static void xilinx_dma_issue_pending(struct dma_chan *dchan)
}
/**
+ * xilinx_dma_device_config - Configure the DMA channel
+ * @dchan: DMA channel
+ * @config: channel configuration
+ */
+static int xilinx_dma_device_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ return 0;
+}
+
+/**
* xilinx_dma_complete_descriptor - Mark the active descriptor as complete
* @chan : xilinx DMA channel
*
@@ -3077,7 +3087,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->ext_addr = false;
/* Set the dma mask bits */
- dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
+ dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
/* Initialize the DMA engine */
xdev->common.dev = &pdev->dev;
@@ -3096,6 +3106,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_synchronize = xilinx_dma_synchronize;
xdev->common.device_tx_status = xilinx_dma_tx_status;
xdev->common.device_issue_pending = xilinx_dma_issue_pending;
+ xdev->common.device_config = xilinx_dma_device_config;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 5fecf5aa6e85..97f02f8eb03a 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -434,8 +434,7 @@ static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
struct zynqmp_dma_desc_sw *child, *next;
chan->desc_free_cnt++;
- list_del(&sdesc->node);
- list_add_tail(&sdesc->node, &chan->free_list);
+ list_move_tail(&sdesc->node, &chan->free_list);
list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
chan->desc_free_cnt++;
list_move_tail(&child->node, &chan->free_list);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 715e491dfbc3..4c3fd2eed1da 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -488,9 +488,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
struct sk_buff *skb, u16 source_node_id,
bool is_broadcast, u16 ether_type)
{
- struct fwnet_device *dev;
int status;
- __be64 guid;
switch (ether_type) {
case ETH_P_ARP:
@@ -503,7 +501,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
goto err;
}
- dev = netdev_priv(net);
/* Write metadata, and then pass to the receive level */
skb->dev = net;
skb->ip_summed = CHECKSUM_NONE;
@@ -512,7 +509,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
* Parse the encapsulation header. This actually does the job of
* converting to an ethernet-like pseudo frame header.
*/
- guid = cpu_to_be64(dev->card->guid);
if (dev_hard_header(skb, net, ether_type,
is_broadcast ? net->broadcast : net->dev_addr,
NULL, skb->len) >= 0) {
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index 4d5421d14a41..940ddf916202 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -73,6 +73,10 @@ static void ascii_filter(char *d, const char *s)
static ssize_t get_modalias(char *buffer, size_t buffer_size)
{
+ /*
+ * Note new fields need to be added at the end to keep compatibility
+ * with udev's hwdb which does matches on "`cat dmi/id/modalias`*".
+ */
static const struct mafield {
const char *prefix;
int field;
@@ -85,13 +89,13 @@ static ssize_t get_modalias(char *buffer, size_t buffer_size)
{ "svn", DMI_SYS_VENDOR },
{ "pn", DMI_PRODUCT_NAME },
{ "pvr", DMI_PRODUCT_VERSION },
- { "sku", DMI_PRODUCT_SKU },
{ "rvn", DMI_BOARD_VENDOR },
{ "rn", DMI_BOARD_NAME },
{ "rvr", DMI_BOARD_VERSION },
{ "cvn", DMI_CHASSIS_VENDOR },
{ "ct", DMI_CHASSIS_TYPE },
{ "cvr", DMI_CHASSIS_VERSION },
+ { "sku", DMI_PRODUCT_SKU },
{ NULL, DMI_NONE }
};
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index ae87dded989d..d489bdc645fe 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -7,7 +7,7 @@
* Copyright 2011 Intel Corporation; author Matt Fleming
*/
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/ctype.h>
#include <linux/efi.h>
diff --git a/drivers/firmware/efi/libstub/vsprintf.c b/drivers/firmware/efi/libstub/vsprintf.c
index 1088e288c04d..71c71c222346 100644
--- a/drivers/firmware/efi/libstub/vsprintf.c
+++ b/drivers/firmware/efi/libstub/vsprintf.c
@@ -10,7 +10,7 @@
* Oh, it's a waste of space, but oh-so-yummy for debugging.
*/
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/compiler.h>
#include <linux/ctype.h>
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 612a59e213df..6e9788324fea 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -86,10 +86,6 @@ MODULE_VERSION(IBFT_ISCSI_VERSION);
static struct acpi_table_ibft *ibft_addr;
-#ifndef CONFIG_ISCSI_IBFT_FIND
-phys_addr_t ibft_phys_addr;
-#endif
-
struct ibft_hdr {
u8 id;
u8 version;
@@ -851,7 +847,21 @@ static void __init acpi_find_ibft_region(void)
{
}
#endif
-
+#ifdef CONFIG_ISCSI_IBFT_FIND
+static int __init acpi_find_isa_region(void)
+{
+ if (ibft_phys_addr) {
+ ibft_addr = isa_bus_to_virt(ibft_phys_addr);
+ return 0;
+ }
+ return -ENODEV;
+}
+#else
+static int __init acpi_find_isa_region(void)
+{
+ return -ENODEV;
+}
+#endif
/*
* ibft_init() - creates sysfs tree entries for the iBFT data.
*/
@@ -864,9 +874,7 @@ static int __init ibft_init(void)
is called before ACPI tables are parsed and it only does
legacy finding.
*/
- if (ibft_phys_addr)
- ibft_addr = isa_bus_to_virt(ibft_phys_addr);
- else
+ if (acpi_find_isa_region())
acpi_find_ibft_region();
if (ibft_addr) {
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index ced1964faf42..2ee97bab7440 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -1147,6 +1147,64 @@ int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
}
EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
+bool qcom_scm_lmh_dcvsh_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
+}
+EXPORT_SYMBOL(qcom_scm_lmh_dcvsh_available);
+
+int qcom_scm_lmh_profile_change(u32 profile_id)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_LMH,
+ .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
+ .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
+ .args[0] = profile_id,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_lmh_profile_change);
+
+int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
+ u64 limit_node, u32 node_id, u64 version)
+{
+ dma_addr_t payload_phys;
+ u32 *payload_buf;
+ int ret, payload_size = 5 * sizeof(u32);
+
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_LMH,
+ .cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
+ .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
+ QCOM_SCM_VAL, QCOM_SCM_VAL),
+ .args[1] = payload_size,
+ .args[2] = limit_node,
+ .args[3] = node_id,
+ .args[4] = version,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
+ if (!payload_buf)
+ return -ENOMEM;
+
+ payload_buf[0] = payload_fn;
+ payload_buf[1] = 0;
+ payload_buf[2] = payload_reg;
+ payload_buf[3] = 1;
+ payload_buf[4] = payload_val;
+
+ desc.args[0] = payload_phys;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+
+ dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_lmh_dcvsh);
+
static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
{
struct device_node *tcsr;
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 632fe3142462..d92156ceb3ac 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -114,6 +114,10 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_SVC_HDCP 0x11
#define QCOM_SCM_HDCP_INVOKE 0x01
+#define QCOM_SCM_SVC_LMH 0x13
+#define QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE 0x01
+#define QCOM_SCM_LMH_LIMIT_DCVSH 0x10
+
#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 81abd890b364..fae5141251e5 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1018,12 +1018,6 @@ config GPIO_MAX732X_IRQ
Say yes here to enable the max732x to be used as an interrupt
controller. It requires the driver to be built in the kernel.
-config GPIO_MC9S08DZ60
- bool "MX35 3DS BOARD MC9S08DZ60 GPIO functions"
- depends on I2C=y && MACH_MX35_3DS
- help
- Select this to enable the MC9S08DZ60 GPIO driver
-
config GPIO_PCA953X
tristate "PCA95[357]x, PCA9698, TCA64xx, and MAX7310 I/O ports"
select REGMAP_I2C
@@ -1677,6 +1671,15 @@ config GPIO_MOCKUP
tools/testing/selftests/gpio/gpio-mockup.sh. Reference the usage in
it.
+config GPIO_VIRTIO
+ tristate "VirtIO GPIO support"
+ depends on VIRTIO
+ help
+ Say Y here to enable guest support for virtio-based GPIO controllers.
+
+ These virtual GPIOs can be routed to real GPIOs or attached to
+ simulators on the host (like QEMU).
+
endmenu
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 5243e2d1c207..fbcda637d5e1 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -92,7 +92,6 @@ obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o
obj-$(CONFIG_GPIO_MAX77650) += gpio-max77650.o
obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o
obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
-obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
obj-$(CONFIG_GPIO_MERRIFIELD) += gpio-merrifield.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
@@ -166,6 +165,7 @@ obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o
obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
+obj-$(CONFIG_GPIO_VIRTIO) += gpio-virtio.o
obj-$(CONFIG_GPIO_VISCONTI) += gpio-visconti.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index a99ece15db95..10f303d15225 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -17,37 +17,30 @@
#include <linux/spinlock.h>
#include <linux/string.h>
-/*
- * MAX_NR_HW_GPIO represents the number of actual hardware-supported GPIOs (ie,
- * slots within the clocked serial GPIO data). Since each HW GPIO is both an
- * input and an output, we provide MAX_NR_HW_GPIO * 2 lines on our gpiochip
- * device.
- *
- * We use SGPIO_OUTPUT_OFFSET to define the split between the inputs and
- * outputs; the inputs start at line 0, the outputs start at OUTPUT_OFFSET.
- */
-#define MAX_NR_HW_SGPIO 80
-#define SGPIO_OUTPUT_OFFSET MAX_NR_HW_SGPIO
-
#define ASPEED_SGPIO_CTRL 0x54
-#define ASPEED_SGPIO_PINS_MASK GENMASK(9, 6)
#define ASPEED_SGPIO_CLK_DIV_MASK GENMASK(31, 16)
#define ASPEED_SGPIO_ENABLE BIT(0)
+#define ASPEED_SGPIO_PINS_SHIFT 6
+
+struct aspeed_sgpio_pdata {
+ const u32 pin_mask;
+};
struct aspeed_sgpio {
struct gpio_chip chip;
+ struct irq_chip intc;
struct clk *pclk;
spinlock_t lock;
void __iomem *base;
int irq;
- int n_sgpio;
};
struct aspeed_sgpio_bank {
- uint16_t val_regs;
- uint16_t rdata_reg;
- uint16_t irq_regs;
+ u16 val_regs;
+ u16 rdata_reg;
+ u16 irq_regs;
+ u16 tolerance_regs;
const char names[4][3];
};
@@ -63,19 +56,29 @@ static const struct aspeed_sgpio_bank aspeed_sgpio_banks[] = {
.val_regs = 0x0000,
.rdata_reg = 0x0070,
.irq_regs = 0x0004,
+ .tolerance_regs = 0x0018,
.names = { "A", "B", "C", "D" },
},
{
.val_regs = 0x001C,
.rdata_reg = 0x0074,
.irq_regs = 0x0020,
+ .tolerance_regs = 0x0034,
.names = { "E", "F", "G", "H" },
},
{
.val_regs = 0x0038,
.rdata_reg = 0x0078,
.irq_regs = 0x003C,
- .names = { "I", "J" },
+ .tolerance_regs = 0x0050,
+ .names = { "I", "J", "K", "L" },
+ },
+ {
+ .val_regs = 0x0090,
+ .rdata_reg = 0x007C,
+ .irq_regs = 0x0094,
+ .tolerance_regs = 0x00A8,
+ .names = { "M", "N", "O", "P" },
},
};
@@ -87,6 +90,7 @@ enum aspeed_sgpio_reg {
reg_irq_type1,
reg_irq_type2,
reg_irq_status,
+ reg_tolerance,
};
#define GPIO_VAL_VALUE 0x00
@@ -115,15 +119,17 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE2;
case reg_irq_status:
return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
+ case reg_tolerance:
+ return gpio->base + bank->tolerance_regs;
default:
/* acturally if code runs to here, it's an error case */
BUG();
}
}
-#define GPIO_BANK(x) ((x % SGPIO_OUTPUT_OFFSET) >> 5)
-#define GPIO_OFFSET(x) ((x % SGPIO_OUTPUT_OFFSET) & 0x1f)
-#define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
+#define GPIO_BANK(x) ((x) >> 6)
+#define GPIO_OFFSET(x) ((x) & GENMASK(5, 0))
+#define GPIO_BIT(x) BIT(GPIO_OFFSET(x) >> 1)
static const struct aspeed_sgpio_bank *to_bank(unsigned int offset)
{
@@ -138,39 +144,25 @@ static const struct aspeed_sgpio_bank *to_bank(unsigned int offset)
static int aspeed_sgpio_init_valid_mask(struct gpio_chip *gc,
unsigned long *valid_mask, unsigned int ngpios)
{
- struct aspeed_sgpio *sgpio = gpiochip_get_data(gc);
- int n = sgpio->n_sgpio;
- int c = SGPIO_OUTPUT_OFFSET - n;
-
- WARN_ON(ngpios < MAX_NR_HW_SGPIO * 2);
-
- /* input GPIOs in the lower range */
- bitmap_set(valid_mask, 0, n);
- bitmap_clear(valid_mask, n, c);
-
- /* output GPIOS above SGPIO_OUTPUT_OFFSET */
- bitmap_set(valid_mask, SGPIO_OUTPUT_OFFSET, n);
- bitmap_clear(valid_mask, SGPIO_OUTPUT_OFFSET + n, c);
-
+ bitmap_set(valid_mask, 0, ngpios);
return 0;
}
static void aspeed_sgpio_irq_init_valid_mask(struct gpio_chip *gc,
unsigned long *valid_mask, unsigned int ngpios)
{
- struct aspeed_sgpio *sgpio = gpiochip_get_data(gc);
- int n = sgpio->n_sgpio;
+ unsigned int i;
- WARN_ON(ngpios < MAX_NR_HW_SGPIO * 2);
-
- /* input GPIOs in the lower range */
- bitmap_set(valid_mask, 0, n);
- bitmap_clear(valid_mask, n, ngpios - n);
+ /* input GPIOs are even bits */
+ for (i = 0; i < ngpios; i++) {
+ if (i % 2)
+ clear_bit(i, valid_mask);
+ }
}
static bool aspeed_sgpio_is_input(unsigned int offset)
{
- return offset < SGPIO_OUTPUT_OFFSET;
+ return !(offset % 2);
}
static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -409,14 +401,6 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(ic, desc);
}
-static struct irq_chip aspeed_sgpio_irqchip = {
- .name = "aspeed-sgpio",
- .irq_ack = aspeed_sgpio_irq_ack,
- .irq_mask = aspeed_sgpio_irq_mask,
- .irq_unmask = aspeed_sgpio_irq_unmask,
- .irq_set_type = aspeed_sgpio_set_type,
-};
-
static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
struct platform_device *pdev)
{
@@ -439,8 +423,14 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_status));
}
+ gpio->intc.name = dev_name(&pdev->dev);
+ gpio->intc.irq_ack = aspeed_sgpio_irq_ack;
+ gpio->intc.irq_mask = aspeed_sgpio_irq_mask;
+ gpio->intc.irq_unmask = aspeed_sgpio_irq_unmask;
+ gpio->intc.irq_set_type = aspeed_sgpio_set_type;
+
irq = &gpio->chip.irq;
- irq->chip = &aspeed_sgpio_irqchip;
+ irq->chip = &gpio->intc;
irq->init_valid_mask = aspeed_sgpio_irq_init_valid_mask;
irq->handler = handle_bad_irq;
irq->default_type = IRQ_TYPE_NONE;
@@ -463,9 +453,56 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
return 0;
}
+static const struct aspeed_sgpio_pdata ast2400_sgpio_pdata = {
+ .pin_mask = GENMASK(9, 6),
+};
+
+static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip,
+ unsigned int offset, bool enable)
+{
+ struct aspeed_sgpio *gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ void __iomem *reg;
+ u32 val;
+
+ reg = bank_reg(gpio, to_bank(offset), reg_tolerance);
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ val = readl(reg);
+
+ if (enable)
+ val |= GPIO_BIT(offset);
+ else
+ val &= ~GPIO_BIT(offset);
+
+ writel(val, reg);
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
+}
+
+static int aspeed_sgpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ unsigned long param = pinconf_to_config_param(config);
+ u32 arg = pinconf_to_config_argument(config);
+
+ if (param == PIN_CONFIG_PERSIST_STATE)
+ return aspeed_sgpio_reset_tolerance(chip, offset, arg);
+
+ return -ENOTSUPP;
+}
+
+static const struct aspeed_sgpio_pdata ast2600_sgpiom_pdata = {
+ .pin_mask = GENMASK(10, 6),
+};
+
static const struct of_device_id aspeed_sgpio_of_table[] = {
- { .compatible = "aspeed,ast2400-sgpio" },
- { .compatible = "aspeed,ast2500-sgpio" },
+ { .compatible = "aspeed,ast2400-sgpio", .data = &ast2400_sgpio_pdata, },
+ { .compatible = "aspeed,ast2500-sgpio", .data = &ast2400_sgpio_pdata, },
+ { .compatible = "aspeed,ast2600-sgpiom", .data = &ast2600_sgpiom_pdata, },
{}
};
@@ -473,10 +510,11 @@ MODULE_DEVICE_TABLE(of, aspeed_sgpio_of_table);
static int __init aspeed_sgpio_probe(struct platform_device *pdev)
{
+ u32 nr_gpios, sgpio_freq, sgpio_clk_div, gpio_cnt_regval, pin_mask;
+ const struct aspeed_sgpio_pdata *pdata;
struct aspeed_sgpio *gpio;
- u32 nr_gpios, sgpio_freq, sgpio_clk_div;
- int rc;
unsigned long apb_freq;
+ int rc;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
@@ -486,18 +524,23 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
- rc = of_property_read_u32(pdev->dev.of_node, "ngpios", &nr_gpios);
+ pdata = device_get_match_data(&pdev->dev);
+ if (!pdata)
+ return -EINVAL;
+
+ pin_mask = pdata->pin_mask;
+
+ rc = device_property_read_u32(&pdev->dev, "ngpios", &nr_gpios);
if (rc < 0) {
dev_err(&pdev->dev, "Could not read ngpios property\n");
return -EINVAL;
- } else if (nr_gpios > MAX_NR_HW_SGPIO) {
- dev_err(&pdev->dev, "Number of GPIOs exceeds the maximum of %d: %d\n",
- MAX_NR_HW_SGPIO, nr_gpios);
+ } else if (nr_gpios % 8) {
+ dev_err(&pdev->dev, "Number of GPIOs not multiple of 8: %d\n",
+ nr_gpios);
return -EINVAL;
}
- gpio->n_sgpio = nr_gpios;
- rc = of_property_read_u32(pdev->dev.of_node, "bus-frequency", &sgpio_freq);
+ rc = device_property_read_u32(&pdev->dev, "bus-frequency", &sgpio_freq);
if (rc < 0) {
dev_err(&pdev->dev, "Could not read bus-frequency property\n");
return -EINVAL;
@@ -528,15 +571,14 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
if (sgpio_clk_div > (1 << 16) - 1)
return -EINVAL;
- iowrite32(FIELD_PREP(ASPEED_SGPIO_CLK_DIV_MASK, sgpio_clk_div) |
- FIELD_PREP(ASPEED_SGPIO_PINS_MASK, (nr_gpios / 8)) |
- ASPEED_SGPIO_ENABLE,
- gpio->base + ASPEED_SGPIO_CTRL);
+ gpio_cnt_regval = ((nr_gpios / 8) << ASPEED_SGPIO_PINS_SHIFT) & pin_mask;
+ iowrite32(FIELD_PREP(ASPEED_SGPIO_CLK_DIV_MASK, sgpio_clk_div) | gpio_cnt_regval |
+ ASPEED_SGPIO_ENABLE, gpio->base + ASPEED_SGPIO_CTRL);
spin_lock_init(&gpio->lock);
gpio->chip.parent = &pdev->dev;
- gpio->chip.ngpio = MAX_NR_HW_SGPIO * 2;
+ gpio->chip.ngpio = nr_gpios * 2;
gpio->chip.init_valid_mask = aspeed_sgpio_init_valid_mask;
gpio->chip.direction_input = aspeed_sgpio_dir_in;
gpio->chip.direction_output = aspeed_sgpio_dir_out;
@@ -545,7 +587,7 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
gpio->chip.free = NULL;
gpio->chip.get = aspeed_sgpio_get;
gpio->chip.set = aspeed_sgpio_set;
- gpio->chip.set_config = NULL;
+ gpio->chip.set_config = aspeed_sgpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 74b7c91c3d1a..895a79936248 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -602,49 +602,6 @@ static const struct dev_pm_ops brcmstb_gpio_pm_ops = {
.resume_noirq = brcmstb_gpio_resume,
};
-static void brcmstb_gpio_set_names(struct device *dev,
- struct brcmstb_gpio_bank *bank)
-{
- struct device_node *np = dev->of_node;
- const char **names;
- int nstrings, base;
- unsigned int i;
-
- base = bank->id * MAX_GPIO_PER_BANK;
-
- nstrings = of_property_count_strings(np, "gpio-line-names");
- if (nstrings <= base)
- /* Line names not present */
- return;
-
- names = devm_kcalloc(dev, MAX_GPIO_PER_BANK, sizeof(*names),
- GFP_KERNEL);
- if (!names)
- return;
-
- /*
- * Make sure to not index beyond the end of the number of descriptors
- * of the GPIO device.
- */
- for (i = 0; i < bank->width; i++) {
- const char *name;
- int ret;
-
- ret = of_property_read_string_index(np, "gpio-line-names",
- base + i, &name);
- if (ret) {
- if (ret != -ENODATA)
- dev_err(dev, "unable to name line %d: %d\n",
- base + i, ret);
- break;
- }
- if (*name)
- names[i] = name;
- }
-
- bank->gc.names = names;
-}
-
static int brcmstb_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -758,6 +715,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
gc->of_xlate = brcmstb_gpio_of_xlate;
/* not all ngpio lines are valid, will use bank width later */
gc->ngpio = MAX_GPIO_PER_BANK;
+ gc->offset = bank->id * MAX_GPIO_PER_BANK;
if (priv->parent_irq > 0)
gc->to_irq = brcmstb_gpio_to_irq;
@@ -768,7 +726,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
need_wakeup_event |= !!__brcmstb_gpio_get_active_irqs(bank);
gc->write_reg(reg_base + GIO_MASK(bank->id), 0);
- brcmstb_gpio_set_names(dev, bank);
err = gpiochip_add_data(gc, bank);
if (err) {
dev_err(dev, "Could not add gpiochip for bank %d\n",
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 3eb13d6d31ef..f98fa33e1679 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -16,7 +16,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/platform_data/gpio-dwapb.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/reset.h>
@@ -48,6 +47,7 @@
#define DWAPB_DRIVER_NAME "gpio-dwapb"
#define DWAPB_MAX_PORTS 4
+#define DWAPB_MAX_GPIOS 32
#define GPIO_EXT_PORT_STRIDE 0x04 /* register stride 32 bits */
#define GPIO_SWPORT_DR_STRIDE 0x0c /* register stride 3*32 bits */
@@ -65,6 +65,19 @@
struct dwapb_gpio;
+struct dwapb_port_property {
+ struct fwnode_handle *fwnode;
+ unsigned int idx;
+ unsigned int ngpio;
+ unsigned int gpio_base;
+ int irq[DWAPB_MAX_GPIOS];
+};
+
+struct dwapb_platform_data {
+ struct dwapb_port_property *properties;
+ unsigned int nports;
+};
+
#ifdef CONFIG_PM_SLEEP
/* Store GPIO context across system-wide suspend/resume transitions */
struct dwapb_context {
@@ -436,21 +449,17 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
pirq->irqchip.irq_set_wake = dwapb_irq_set_wake;
#endif
- if (!pp->irq_shared) {
- girq->num_parents = pirq->nr_irqs;
- girq->parents = pirq->irq;
- girq->parent_handler_data = gpio;
- girq->parent_handler = dwapb_irq_handler;
- } else {
- /* This will let us handle the parent IRQ in the driver */
+ /*
+ * Intel ACPI-based platforms mostly have the DesignWare APB GPIO
+ * IRQ lane shared between several devices. In that case the parental
+ * IRQ has to be handled in the shared way so to be properly delivered
+ * to all the connected devices.
+ */
+ if (has_acpi_companion(gpio->dev)) {
girq->num_parents = 0;
girq->parents = NULL;
girq->parent_handler = NULL;
- /*
- * Request a shared IRQ since where MFD would have devices
- * using the same irq pin
- */
err = devm_request_irq(gpio->dev, pp->irq[0],
dwapb_irq_handler_mfd,
IRQF_SHARED, DWAPB_DRIVER_NAME, gpio);
@@ -458,6 +467,11 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
dev_err(gpio->dev, "error requesting IRQ\n");
goto err_kfree_pirq;
}
+ } else {
+ girq->num_parents = pirq->nr_irqs;
+ girq->parents = pirq->irq;
+ girq->parent_handler_data = gpio;
+ girq->parent_handler = dwapb_irq_handler;
}
girq->chip = &pirq->irqchip;
@@ -581,9 +595,12 @@ static struct dwapb_platform_data *dwapb_gpio_get_pdata(struct device *dev)
pp->ngpio = DWAPB_MAX_GPIOS;
}
- pp->irq_shared = false;
pp->gpio_base = -1;
+ /* For internal use only, new platforms mustn't exercise this */
+ if (is_software_node(fwnode))
+ fwnode_property_read_u32(fwnode, "gpio-base", &pp->gpio_base);
+
/*
* Only port A can provide interrupts in all configurations of
* the IP.
@@ -670,17 +687,12 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
unsigned int i;
struct dwapb_gpio *gpio;
int err;
+ struct dwapb_platform_data *pdata;
struct device *dev = &pdev->dev;
- struct dwapb_platform_data *pdata = dev_get_platdata(dev);
-
- if (!pdata) {
- pdata = dwapb_gpio_get_pdata(dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- }
- if (!pdata->nports)
- return -ENODEV;
+ pdata = dwapb_gpio_get_pdata(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
diff --git a/drivers/gpio/gpio-mc9s08dz60.c b/drivers/gpio/gpio-mc9s08dz60.c
deleted file mode 100644
index a9f17cebd5ed..000000000000
--- a/drivers/gpio/gpio-mc9s08dz60.c
+++ /dev/null
@@ -1,112 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2009-2012 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * Author: Wu Guoxing <b39297@freescale.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/gpio/driver.h>
-
-#define GPIO_GROUP_NUM 2
-#define GPIO_NUM_PER_GROUP 8
-#define GPIO_NUM (GPIO_GROUP_NUM*GPIO_NUM_PER_GROUP)
-
-struct mc9s08dz60 {
- struct i2c_client *client;
- struct gpio_chip chip;
-};
-
-static void mc9s_gpio_to_reg_and_bit(int offset, u8 *reg, u8 *bit)
-{
- *reg = 0x20 + offset / GPIO_NUM_PER_GROUP;
- *bit = offset % GPIO_NUM_PER_GROUP;
-}
-
-static int mc9s08dz60_get_value(struct gpio_chip *gc, unsigned offset)
-{
- u8 reg, bit;
- s32 value;
- struct mc9s08dz60 *mc9s = gpiochip_get_data(gc);
-
- mc9s_gpio_to_reg_and_bit(offset, &reg, &bit);
- value = i2c_smbus_read_byte_data(mc9s->client, reg);
-
- return (value >= 0) ? (value >> bit) & 0x1 : 0;
-}
-
-static int mc9s08dz60_set(struct mc9s08dz60 *mc9s, unsigned offset, int val)
-{
- u8 reg, bit;
- s32 value;
-
- mc9s_gpio_to_reg_and_bit(offset, &reg, &bit);
- value = i2c_smbus_read_byte_data(mc9s->client, reg);
- if (value >= 0) {
- if (val)
- value |= 1 << bit;
- else
- value &= ~(1 << bit);
-
- return i2c_smbus_write_byte_data(mc9s->client, reg, value);
- } else
- return value;
-
-}
-
-
-static void mc9s08dz60_set_value(struct gpio_chip *gc, unsigned offset, int val)
-{
- struct mc9s08dz60 *mc9s = gpiochip_get_data(gc);
-
- mc9s08dz60_set(mc9s, offset, val);
-}
-
-static int mc9s08dz60_direction_output(struct gpio_chip *gc,
- unsigned offset, int val)
-{
- struct mc9s08dz60 *mc9s = gpiochip_get_data(gc);
-
- return mc9s08dz60_set(mc9s, offset, val);
-}
-
-static int mc9s08dz60_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct mc9s08dz60 *mc9s;
-
- mc9s = devm_kzalloc(&client->dev, sizeof(*mc9s), GFP_KERNEL);
- if (!mc9s)
- return -ENOMEM;
-
- mc9s->chip.label = client->name;
- mc9s->chip.base = -1;
- mc9s->chip.parent = &client->dev;
- mc9s->chip.owner = THIS_MODULE;
- mc9s->chip.ngpio = GPIO_NUM;
- mc9s->chip.can_sleep = true;
- mc9s->chip.get = mc9s08dz60_get_value;
- mc9s->chip.set = mc9s08dz60_set_value;
- mc9s->chip.direction_output = mc9s08dz60_direction_output;
- mc9s->client = client;
- i2c_set_clientdata(client, mc9s);
-
- return devm_gpiochip_add_data(&client->dev, &mc9s->chip, mc9s);
-}
-
-static const struct i2c_device_id mc9s08dz60_id[] = {
- {"mc9s08dz60", 0},
- {},
-};
-
-static struct i2c_driver mc9s08dz60_i2c_driver = {
- .driver = {
- .name = "mc9s08dz60",
- },
- .probe = mc9s08dz60_probe,
- .id_table = mc9s08dz60_id,
-};
-builtin_i2c_driver(mc9s08dz60_i2c_driver);
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 53d4abefa6ff..efa9acdc320a 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -155,11 +155,10 @@ static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
return 0;
}
-#ifdef CONFIG_PM
/*
* Save register configuration and disable interrupts.
*/
-static void ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
+static void __maybe_unused ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
{
int i;
@@ -185,7 +184,7 @@ static void ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
/*
* This function restores the register configuration of the GPIO device.
*/
-static void ioh_gpio_restore_reg_conf(struct ioh_gpio *chip)
+static void __maybe_unused ioh_gpio_restore_reg_conf(struct ioh_gpio *chip)
{
int i;
@@ -207,7 +206,6 @@ static void ioh_gpio_restore_reg_conf(struct ioh_gpio *chip)
&chip->reg->ioh_sel_reg[i]);
}
}
-#endif
static int ioh_gpio_to_irq(struct gpio_chip *gpio, unsigned offset)
{
@@ -522,47 +520,23 @@ static void ioh_gpio_remove(struct pci_dev *pdev)
kfree(chip);
}
-#ifdef CONFIG_PM
-static int ioh_gpio_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused ioh_gpio_suspend(struct device *dev)
{
- s32 ret;
- struct ioh_gpio *chip = pci_get_drvdata(pdev);
+ struct ioh_gpio *chip = dev_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&chip->spinlock, flags);
ioh_gpio_save_reg_conf(chip);
spin_unlock_irqrestore(&chip->spinlock, flags);
- ret = pci_save_state(pdev);
- if (ret) {
- dev_err(&pdev->dev, "pci_save_state Failed-%d\n", ret);
- return ret;
- }
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D0);
- ret = pci_enable_wake(pdev, PCI_D0, 1);
- if (ret)
- dev_err(&pdev->dev, "pci_enable_wake Failed -%d\n", ret);
-
return 0;
}
-static int ioh_gpio_resume(struct pci_dev *pdev)
+static int __maybe_unused ioh_gpio_resume(struct device *dev)
{
- s32 ret;
- struct ioh_gpio *chip = pci_get_drvdata(pdev);
+ struct ioh_gpio *chip = dev_get_drvdata(dev);
unsigned long flags;
- ret = pci_enable_wake(pdev, PCI_D0, 0);
-
- pci_set_power_state(pdev, PCI_D0);
- ret = pci_enable_device(pdev);
- if (ret) {
- dev_err(&pdev->dev, "pci_enable_device Failed-%d ", ret);
- return ret;
- }
- pci_restore_state(pdev);
-
spin_lock_irqsave(&chip->spinlock, flags);
iowrite32(0x01, &chip->reg->srst);
iowrite32(0x00, &chip->reg->srst);
@@ -571,10 +545,8 @@ static int ioh_gpio_resume(struct pci_dev *pdev)
return 0;
}
-#else
-#define ioh_gpio_suspend NULL
-#define ioh_gpio_resume NULL
-#endif
+
+static SIMPLE_DEV_PM_OPS(ioh_gpio_pm_ops, ioh_gpio_suspend, ioh_gpio_resume);
static const struct pci_device_id ioh_gpio_pcidev_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) },
@@ -587,8 +559,9 @@ static struct pci_driver ioh_gpio_driver = {
.id_table = ioh_gpio_pcidev_id,
.probe = ioh_gpio_probe,
.remove = ioh_gpio_remove,
- .suspend = ioh_gpio_suspend,
- .resume = ioh_gpio_resume
+ .driver = {
+ .pm = &ioh_gpio_pm_ops,
+ },
};
module_pci_driver(ioh_gpio_driver);
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index befa5e109943..177d03ef4529 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/device.h>
@@ -8,6 +7,7 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
@@ -47,12 +47,10 @@
#define YU_GPIO_MODE0_SET 0x54
#define YU_GPIO_MODE0_CLEAR 0x58
-#ifdef CONFIG_PM
struct mlxbf2_gpio_context_save_regs {
u32 gpio_mode0;
u32 gpio_mode1;
};
-#endif
/* BlueField-2 gpio block context structure. */
struct mlxbf2_gpio_context {
@@ -61,9 +59,7 @@ struct mlxbf2_gpio_context {
/* YU GPIO blocks address */
void __iomem *gpio_io;
-#ifdef CONFIG_PM
struct mlxbf2_gpio_context_save_regs *csave_regs;
-#endif
};
/* BlueField-2 gpio shared structure. */
@@ -73,11 +69,8 @@ struct mlxbf2_gpio_param {
struct mutex *lock;
};
-static struct resource yu_arm_gpio_lock_res = {
- .start = YU_ARM_GPIO_LOCK_ADDR,
- .end = YU_ARM_GPIO_LOCK_ADDR + YU_ARM_GPIO_LOCK_SIZE - 1,
- .name = "YU_ARM_GPIO_LOCK",
-};
+static struct resource yu_arm_gpio_lock_res =
+ DEFINE_RES_MEM_NAMED(YU_ARM_GPIO_LOCK_ADDR, YU_ARM_GPIO_LOCK_SIZE, "YU_ARM_GPIO_LOCK");
static DEFINE_MUTEX(yu_arm_gpio_lock_mutex);
@@ -232,7 +225,6 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
struct mlxbf2_gpio_context *gs;
struct device *dev = &pdev->dev;
struct gpio_chip *gc;
- struct resource *res;
unsigned int npins;
int ret;
@@ -241,13 +233,9 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
/* YU GPIO block address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- gs->gpio_io = devm_ioremap(dev, res->start, resource_size(res));
- if (!gs->gpio_io)
- return -ENOMEM;
+ gs->gpio_io = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gs->gpio_io))
+ return PTR_ERR(gs->gpio_io);
ret = mlxbf2_gpio_get_lock_res(pdev);
if (ret) {
@@ -284,11 +272,9 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int mlxbf2_gpio_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int __maybe_unused mlxbf2_gpio_suspend(struct device *dev)
{
- struct mlxbf2_gpio_context *gs = platform_get_drvdata(pdev);
+ struct mlxbf2_gpio_context *gs = dev_get_drvdata(dev);
gs->csave_regs->gpio_mode0 = readl(gs->gpio_io +
YU_GPIO_MODE0);
@@ -298,9 +284,9 @@ static int mlxbf2_gpio_suspend(struct platform_device *pdev,
return 0;
}
-static int mlxbf2_gpio_resume(struct platform_device *pdev)
+static int __maybe_unused mlxbf2_gpio_resume(struct device *dev)
{
- struct mlxbf2_gpio_context *gs = platform_get_drvdata(pdev);
+ struct mlxbf2_gpio_context *gs = dev_get_drvdata(dev);
writel(gs->csave_regs->gpio_mode0, gs->gpio_io +
YU_GPIO_MODE0);
@@ -309,7 +295,7 @@ static int mlxbf2_gpio_resume(struct platform_device *pdev)
return 0;
}
-#endif
+static SIMPLE_DEV_PM_OPS(mlxbf2_pm_ops, mlxbf2_gpio_suspend, mlxbf2_gpio_resume);
static const struct acpi_device_id __maybe_unused mlxbf2_gpio_acpi_match[] = {
{ "MLNXBF22", 0 },
@@ -320,13 +306,10 @@ MODULE_DEVICE_TABLE(acpi, mlxbf2_gpio_acpi_match);
static struct platform_driver mlxbf2_gpio_driver = {
.driver = {
.name = "mlxbf2_gpio",
- .acpi_match_table = ACPI_PTR(mlxbf2_gpio_acpi_match),
+ .acpi_match_table = mlxbf2_gpio_acpi_match,
+ .pm = &mlxbf2_pm_ops,
},
.probe = mlxbf2_gpio_probe,
-#ifdef CONFIG_PM
- .suspend = mlxbf2_gpio_suspend,
- .resume = mlxbf2_gpio_resume,
-#endif
};
module_platform_driver(mlxbf2_gpio_driver);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 67dc38976ab6..70d6ae20b1da 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -332,7 +332,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
mpc8xxx_gc->regs + GPIO_DIR, NULL,
BGPIOF_BIG_ENDIAN);
if (ret)
- goto err;
+ return ret;
dev_dbg(&pdev->dev, "GPIO registers are LITTLE endian\n");
} else {
ret = bgpio_init(gc, &pdev->dev, 4,
@@ -342,7 +342,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
BGPIOF_BIG_ENDIAN
| BGPIOF_BIG_ENDIAN_BYTE_ORDER);
if (ret)
- goto err;
+ return ret;
dev_dbg(&pdev->dev, "GPIO registers are BIG endian\n");
}
@@ -380,11 +380,11 @@ static int mpc8xxx_probe(struct platform_device *pdev)
is_acpi_node(fwnode))
gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
- ret = gpiochip_add_data(gc, mpc8xxx_gc);
+ ret = devm_gpiochip_add_data(&pdev->dev, gc, mpc8xxx_gc);
if (ret) {
dev_err(&pdev->dev,
"GPIO chip registration failed with status %d\n", ret);
- goto err;
+ return ret;
}
mpc8xxx_gc->irqn = platform_get_irq(pdev, 0);
@@ -416,7 +416,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
return 0;
err:
- iounmap(mpc8xxx_gc->regs);
+ irq_domain_remove(mpc8xxx_gc->irq);
return ret;
}
@@ -429,9 +429,6 @@ static int mpc8xxx_remove(struct platform_device *pdev)
irq_domain_remove(mpc8xxx_gc->irq);
}
- gpiochip_remove(&mpc8xxx_gc->gc);
- iounmap(mpc8xxx_gc->regs);
-
return 0;
}
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index 10c0a9bc5ea1..c3658a597a80 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -239,6 +239,7 @@ mediatek_gpio_bank_probe(struct device *dev,
if (!rg->chip.label)
return -ENOMEM;
+ rg->chip.offset = bank * MTK_BANK_WIDTH;
rg->irq_chip.name = dev_name(dev);
rg->irq_chip.parent_device = dev;
rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index b378aba32602..f7b653314e7e 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -564,9 +564,9 @@ static int gpio_rcar_probe(struct platform_device *pdev)
}
if (p->info.has_inen) {
- pm_runtime_get_sync(p->dev);
+ pm_runtime_get_sync(dev);
gpio_rcar_enable_inputs(p);
- pm_runtime_put(p->dev);
+ pm_runtime_put(dev);
}
dev_info(dev, "driving %d GPIOs\n", npins);
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 05c90d76cb22..c99858f40a27 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -607,15 +607,21 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
if (!gpio)
return -ENOMEM;
- gpio->soc = of_device_get_match_data(&pdev->dev);
+ gpio->soc = device_get_match_data(&pdev->dev);
gpio->secure = devm_platform_ioremap_resource_byname(pdev, "security");
- if (IS_ERR(gpio->secure))
- return PTR_ERR(gpio->secure);
+ if (IS_ERR(gpio->secure)) {
+ gpio->secure = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gpio->secure))
+ return PTR_ERR(gpio->secure);
+ }
gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio");
- if (IS_ERR(gpio->base))
- return PTR_ERR(gpio->base);
+ if (IS_ERR(gpio->base)) {
+ gpio->base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(gpio->base))
+ return PTR_ERR(gpio->base);
+ }
err = platform_irq_count(pdev);
if (err < 0)
@@ -677,11 +683,13 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.names = (const char * const *)names;
+#if defined(CONFIG_OF_GPIO)
gpio->gpio.of_node = pdev->dev.of_node;
gpio->gpio.of_gpio_n_cells = 2;
gpio->gpio.of_xlate = tegra186_gpio_of_xlate;
+#endif /* CONFIG_OF_GPIO */
- gpio->intc.name = pdev->dev.of_node->name;
+ gpio->intc.name = dev_name(&pdev->dev);
gpio->intc.irq_ack = tegra186_irq_ack;
gpio->intc.irq_mask = tegra186_irq_mask;
gpio->intc.irq_unmask = tegra186_irq_unmask;
@@ -893,10 +901,20 @@ static const struct of_device_id tegra186_gpio_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra186_gpio_of_match);
+static const struct acpi_device_id tegra186_gpio_acpi_match[] = {
+ { .id = "NVDA0108", .driver_data = (kernel_ulong_t)&tegra186_main_soc },
+ { .id = "NVDA0208", .driver_data = (kernel_ulong_t)&tegra186_aon_soc },
+ { .id = "NVDA0308", .driver_data = (kernel_ulong_t)&tegra194_main_soc },
+ { .id = "NVDA0408", .driver_data = (kernel_ulong_t)&tegra194_aon_soc },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, tegra186_gpio_acpi_match);
+
static struct platform_driver tegra186_gpio_driver = {
.driver = {
.name = "tegra186-gpio",
.of_match_table = tegra186_gpio_of_match,
+ .acpi_match_table = tegra186_gpio_acpi_match,
},
.probe = tegra186_gpio_probe,
};
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index c301c1d56dd2..e55d28a8a66f 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -404,11 +404,10 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpioa.get = vprbrd_gpioa_get;
vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input;
vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output;
+
ret = devm_gpiochip_add_data(&pdev->dev, &vb_gpio->gpioa, vb_gpio);
- if (ret < 0) {
- dev_err(vb_gpio->gpioa.parent, "could not add gpio a");
+ if (ret < 0)
return ret;
- }
/* registering gpio b */
vb_gpio->gpiob.label = "viperboard gpio b";
@@ -421,15 +420,8 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpiob.get = vprbrd_gpiob_get;
vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input;
vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output;
- ret = devm_gpiochip_add_data(&pdev->dev, &vb_gpio->gpiob, vb_gpio);
- if (ret < 0) {
- dev_err(vb_gpio->gpiob.parent, "could not add gpio b");
- return ret;
- }
-
- platform_set_drvdata(pdev, vb_gpio);
- return ret;
+ return devm_gpiochip_add_data(&pdev->dev, &vb_gpio->gpiob, vb_gpio);
}
static struct platform_driver vprbrd_gpio_driver = {
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
new file mode 100644
index 000000000000..d24f1c9264bc
--- /dev/null
+++ b/drivers/gpio/gpio-virtio.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * GPIO driver for virtio-based virtual GPIO controllers
+ *
+ * Copyright (C) 2021 metux IT consult
+ * Enrico Weigelt, metux IT consult <info@metux.net>
+ *
+ * Copyright (C) 2021 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ */
+
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/virtio_config.h>
+#include <uapi/linux/virtio_gpio.h>
+#include <uapi/linux/virtio_ids.h>
+
+struct virtio_gpio_line {
+ struct mutex lock; /* Protects line operation */
+ struct completion completion;
+ struct virtio_gpio_request req ____cacheline_aligned;
+ struct virtio_gpio_response res ____cacheline_aligned;
+ unsigned int rxlen;
+};
+
+struct virtio_gpio {
+ struct virtio_device *vdev;
+ struct mutex lock; /* Protects virtqueue operation */
+ struct gpio_chip gc;
+ struct virtio_gpio_line *lines;
+ struct virtqueue *request_vq;
+};
+
+static int _virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio,
+ u8 txvalue, u8 *rxvalue, void *response, u32 rxlen)
+{
+ struct virtio_gpio_line *line = &vgpio->lines[gpio];
+ struct virtio_gpio_request *req = &line->req;
+ struct virtio_gpio_response *res = response;
+ struct scatterlist *sgs[2], req_sg, res_sg;
+ struct device *dev = &vgpio->vdev->dev;
+ int ret;
+
+ /*
+ * Prevent concurrent requests for the same line since we have
+ * pre-allocated request/response buffers for each GPIO line. Moreover
+ * Linux always accesses a GPIO line sequentially, so this locking shall
+ * always go through without any delays.
+ */
+ mutex_lock(&line->lock);
+
+ req->type = cpu_to_le16(type);
+ req->gpio = cpu_to_le16(gpio);
+ req->value = cpu_to_le32(txvalue);
+
+ sg_init_one(&req_sg, req, sizeof(*req));
+ sg_init_one(&res_sg, res, rxlen);
+ sgs[0] = &req_sg;
+ sgs[1] = &res_sg;
+
+ line->rxlen = 0;
+ reinit_completion(&line->completion);
+
+ /*
+ * Virtqueue callers need to ensure they don't call its APIs with other
+ * virtqueue operations at the same time.
+ */
+ mutex_lock(&vgpio->lock);
+ ret = virtqueue_add_sgs(vgpio->request_vq, sgs, 1, 1, line, GFP_KERNEL);
+ if (ret) {
+ dev_err(dev, "failed to add request to vq\n");
+ mutex_unlock(&vgpio->lock);
+ goto out;
+ }
+
+ virtqueue_kick(vgpio->request_vq);
+ mutex_unlock(&vgpio->lock);
+
+ if (!wait_for_completion_timeout(&line->completion, HZ)) {
+ dev_err(dev, "GPIO operation timed out\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (unlikely(res->status != VIRTIO_GPIO_STATUS_OK)) {
+ dev_err(dev, "GPIO request failed: %d\n", gpio);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(line->rxlen != rxlen)) {
+ dev_err(dev, "GPIO operation returned incorrect len (%u : %u)\n",
+ rxlen, line->rxlen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (rxvalue)
+ *rxvalue = res->value;
+
+out:
+ mutex_unlock(&line->lock);
+ return ret;
+}
+
+static int virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio,
+ u8 txvalue, u8 *rxvalue)
+{
+ struct virtio_gpio_line *line = &vgpio->lines[gpio];
+ struct virtio_gpio_response *res = &line->res;
+
+ return _virtio_gpio_req(vgpio, type, gpio, txvalue, rxvalue, res,
+ sizeof(*res));
+}
+
+static void virtio_gpio_free(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct virtio_gpio *vgpio = gpiochip_get_data(gc);
+
+ virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_DIRECTION, gpio,
+ VIRTIO_GPIO_DIRECTION_NONE, NULL);
+}
+
+static int virtio_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct virtio_gpio *vgpio = gpiochip_get_data(gc);
+ u8 direction;
+ int ret;
+
+ ret = virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_GET_DIRECTION, gpio, 0,
+ &direction);
+ if (ret)
+ return ret;
+
+ switch (direction) {
+ case VIRTIO_GPIO_DIRECTION_IN:
+ return GPIO_LINE_DIRECTION_IN;
+ case VIRTIO_GPIO_DIRECTION_OUT:
+ return GPIO_LINE_DIRECTION_OUT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int virtio_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct virtio_gpio *vgpio = gpiochip_get_data(gc);
+
+ return virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_DIRECTION, gpio,
+ VIRTIO_GPIO_DIRECTION_IN, NULL);
+}
+
+static int virtio_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
+ int value)
+{
+ struct virtio_gpio *vgpio = gpiochip_get_data(gc);
+ int ret;
+
+ ret = virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value, NULL);
+ if (ret)
+ return ret;
+
+ return virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_DIRECTION, gpio,
+ VIRTIO_GPIO_DIRECTION_OUT, NULL);
+}
+
+static int virtio_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct virtio_gpio *vgpio = gpiochip_get_data(gc);
+ u8 value;
+ int ret;
+
+ ret = virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_GET_VALUE, gpio, 0, &value);
+ return ret ? ret : value;
+}
+
+static void virtio_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+{
+ struct virtio_gpio *vgpio = gpiochip_get_data(gc);
+
+ virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value, NULL);
+}
+
+static void virtio_gpio_request_vq(struct virtqueue *vq)
+{
+ struct virtio_gpio_line *line;
+ unsigned int len;
+
+ do {
+ line = virtqueue_get_buf(vq, &len);
+ if (!line)
+ return;
+
+ line->rxlen = len;
+ complete(&line->completion);
+ } while (1);
+}
+
+static void virtio_gpio_free_vqs(struct virtio_device *vdev)
+{
+ vdev->config->reset(vdev);
+ vdev->config->del_vqs(vdev);
+}
+
+static int virtio_gpio_alloc_vqs(struct virtio_gpio *vgpio,
+ struct virtio_device *vdev)
+{
+ const char * const names[] = { "requestq" };
+ vq_callback_t *cbs[] = {
+ virtio_gpio_request_vq,
+ };
+ struct virtqueue *vqs[1] = { NULL };
+ int ret;
+
+ ret = virtio_find_vqs(vdev, 1, vqs, cbs, names, NULL);
+ if (ret) {
+ dev_err(&vdev->dev, "failed to find vqs: %d\n", ret);
+ return ret;
+ }
+
+ if (!vqs[0]) {
+ dev_err(&vdev->dev, "failed to find requestq vq\n");
+ return -ENODEV;
+ }
+ vgpio->request_vq = vqs[0];
+
+ return 0;
+}
+
+static const char **virtio_gpio_get_names(struct virtio_gpio *vgpio,
+ u32 gpio_names_size, u16 ngpio)
+{
+ struct virtio_gpio_response_get_names *res;
+ struct device *dev = &vgpio->vdev->dev;
+ u8 *gpio_names, *str;
+ const char **names;
+ int i, ret, len;
+
+ if (!gpio_names_size)
+ return NULL;
+
+ len = sizeof(*res) + gpio_names_size;
+ res = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!res)
+ return NULL;
+ gpio_names = res->value;
+
+ ret = _virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_GET_NAMES, 0, 0, NULL,
+ res, len);
+ if (ret) {
+ dev_err(dev, "Failed to get GPIO names: %d\n", ret);
+ return NULL;
+ }
+
+ names = devm_kcalloc(dev, ngpio, sizeof(*names), GFP_KERNEL);
+ if (!names)
+ return NULL;
+
+ /* NULL terminate the string instead of checking it */
+ gpio_names[gpio_names_size - 1] = '\0';
+
+ for (i = 0, str = gpio_names; i < ngpio; i++) {
+ names[i] = str;
+ str += strlen(str) + 1; /* zero-length strings are allowed */
+
+ if (str > gpio_names + gpio_names_size) {
+ dev_err(dev, "gpio_names block is too short (%d)\n", i);
+ return NULL;
+ }
+ }
+
+ return names;
+}
+
+static int virtio_gpio_probe(struct virtio_device *vdev)
+{
+ struct virtio_gpio_config config;
+ struct device *dev = &vdev->dev;
+ struct virtio_gpio *vgpio;
+ u32 gpio_names_size;
+ u16 ngpio;
+ int ret, i;
+
+ vgpio = devm_kzalloc(dev, sizeof(*vgpio), GFP_KERNEL);
+ if (!vgpio)
+ return -ENOMEM;
+
+ /* Read configuration */
+ virtio_cread_bytes(vdev, 0, &config, sizeof(config));
+ gpio_names_size = le32_to_cpu(config.gpio_names_size);
+ ngpio = le16_to_cpu(config.ngpio);
+ if (!ngpio) {
+ dev_err(dev, "Number of GPIOs can't be zero\n");
+ return -EINVAL;
+ }
+
+ vgpio->lines = devm_kcalloc(dev, ngpio, sizeof(*vgpio->lines), GFP_KERNEL);
+ if (!vgpio->lines)
+ return -ENOMEM;
+
+ for (i = 0; i < ngpio; i++) {
+ mutex_init(&vgpio->lines[i].lock);
+ init_completion(&vgpio->lines[i].completion);
+ }
+
+ mutex_init(&vgpio->lock);
+ vdev->priv = vgpio;
+
+ vgpio->vdev = vdev;
+ vgpio->gc.free = virtio_gpio_free;
+ vgpio->gc.get_direction = virtio_gpio_get_direction;
+ vgpio->gc.direction_input = virtio_gpio_direction_input;
+ vgpio->gc.direction_output = virtio_gpio_direction_output;
+ vgpio->gc.get = virtio_gpio_get;
+ vgpio->gc.set = virtio_gpio_set;
+ vgpio->gc.ngpio = ngpio;
+ vgpio->gc.base = -1; /* Allocate base dynamically */
+ vgpio->gc.label = dev_name(dev);
+ vgpio->gc.parent = dev;
+ vgpio->gc.owner = THIS_MODULE;
+ vgpio->gc.can_sleep = true;
+
+ ret = virtio_gpio_alloc_vqs(vgpio, vdev);
+ if (ret)
+ return ret;
+
+ /* Mark the device ready to perform operations from within probe() */
+ virtio_device_ready(vdev);
+
+ vgpio->gc.names = virtio_gpio_get_names(vgpio, gpio_names_size, ngpio);
+
+ ret = gpiochip_add_data(&vgpio->gc, vgpio);
+ if (ret) {
+ virtio_gpio_free_vqs(vdev);
+ dev_err(dev, "Failed to add virtio-gpio controller\n");
+ }
+
+ return ret;
+}
+
+static void virtio_gpio_remove(struct virtio_device *vdev)
+{
+ struct virtio_gpio *vgpio = vdev->priv;
+
+ gpiochip_remove(&vgpio->gc);
+ virtio_gpio_free_vqs(vdev);
+}
+
+static const struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_GPIO, VIRTIO_DEV_ANY_ID },
+ {},
+};
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_gpio_driver = {
+ .id_table = id_table,
+ .probe = virtio_gpio_probe,
+ .remove = virtio_gpio_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+};
+module_virtio_driver(virtio_gpio_driver);
+
+MODULE_AUTHOR("Enrico Weigelt, metux IT consult <info@metux.net>");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_DESCRIPTION("VirtIO GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 4a517e5dedf0..79da85d17b71 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -145,7 +145,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_index);
* In case of error an ERR_PTR() is returned.
*/
struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
- struct device_node *node,
+ const struct device_node *node,
const char *propname, int index,
enum gpiod_flags dflags,
const char *label)
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index bbcc7c073f63..0ad288ab6262 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -122,7 +122,7 @@ static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
bool of_gpio_need_valid_mask(const struct gpio_chip *gc)
{
int size;
- struct device_node *np = gc->of_node;
+ const struct device_node *np = gc->of_node;
size = of_property_count_u32_elems(np, "gpio-reserved-ranges");
if (size > 0 && size % 2 == 0)
@@ -130,7 +130,7 @@ bool of_gpio_need_valid_mask(const struct gpio_chip *gc)
return false;
}
-static void of_gpio_flags_quirks(struct device_node *np,
+static void of_gpio_flags_quirks(const struct device_node *np,
const char *propname,
enum of_gpio_flags *flags,
int index)
@@ -236,7 +236,7 @@ static void of_gpio_flags_quirks(struct device_node *np,
* value on the error condition. If @flags is not NULL the function also fills
* in flags for the GPIO.
*/
-static struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
+static struct gpio_desc *of_get_named_gpiod_flags(const struct device_node *np,
const char *propname, int index, enum of_gpio_flags *flags)
{
struct of_phandle_args gpiospec;
@@ -275,7 +275,7 @@ out:
return desc;
}
-int of_get_named_gpio_flags(struct device_node *np, const char *list_name,
+int of_get_named_gpio_flags(const struct device_node *np, const char *list_name,
int index, enum of_gpio_flags *flags)
{
struct gpio_desc *desc;
@@ -303,7 +303,7 @@ EXPORT_SYMBOL_GPL(of_get_named_gpio_flags);
*
* In case of error an ERR_PTR() is returned.
*/
-struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
const char *propname, int index,
enum gpiod_flags dflags,
const char *label)
@@ -373,7 +373,7 @@ static struct gpio_desc *of_find_spi_gpio(struct device *dev, const char *con_id
enum of_gpio_flags *of_flags)
{
char prop_name[32]; /* 32 is max size of property name */
- struct device_node *np = dev->of_node;
+ const struct device_node *np = dev->of_node;
struct gpio_desc *desc;
/*
@@ -404,7 +404,7 @@ static struct gpio_desc *of_find_spi_cs_gpio(struct device *dev,
unsigned int idx,
unsigned long *flags)
{
- struct device_node *np = dev->of_node;
+ const struct device_node *np = dev->of_node;
if (!IS_ENABLED(CONFIG_SPI_MASTER))
return ERR_PTR(-ENOENT);
@@ -440,7 +440,7 @@ static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *
"wlf,ldo1ena", /* WM8994 */
"wlf,ldo2ena", /* WM8994 */
};
- struct device_node *np = dev->of_node;
+ const struct device_node *np = dev->of_node;
struct gpio_desc *desc;
int i;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 27c07108496d..d1b9b721218f 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -382,10 +382,18 @@ static int devprop_gpiochip_set_names(struct gpio_chip *chip)
if (count < 0)
return 0;
- if (count > gdev->ngpio) {
- dev_warn(&gdev->dev, "gpio-line-names is length %d but should be at most length %d",
- count, gdev->ngpio);
- count = gdev->ngpio;
+ /*
+ * When offset is set in the driver side we assume the driver internally
+ * is using more than one gpiochip per the same device. We have to stop
+ * setting friendly names if the specified ones with 'gpio-line-names'
+ * are less than the offset in the device itself. This means all the
+ * lines are not present for every single pin within all the internal
+ * gpiochips.
+ */
+ if (count <= chip->offset) {
+ dev_warn(&gdev->dev, "gpio-line-names too short (length %d), cannot map names for the gpiochip at offset %u\n",
+ count, chip->offset);
+ return 0;
}
names = kcalloc(count, sizeof(*names), GFP_KERNEL);
@@ -400,8 +408,22 @@ static int devprop_gpiochip_set_names(struct gpio_chip *chip)
return ret;
}
+ /*
+ * When more that one gpiochip per device is used, 'count' can
+ * contain at most number gpiochips x chip->ngpio. We have to
+ * correctly distribute all defined lines taking into account
+ * chip->offset as starting point from where we will assign
+ * the names to pins from the 'names' array. Since property
+ * 'gpio-line-names' cannot contains gaps, we have to be sure
+ * we only assign those pins that really exists since chip->ngpio
+ * can be different of the chip->offset.
+ */
+ count = (count > chip->offset) ? count - chip->offset : count;
+ if (count > chip->ngpio)
+ count = chip->ngpio;
+
for (i = 0; i < count; i++)
- gdev->descs[i].name = names[i];
+ gdev->descs[i].name = names[chip->offset + i];
kfree(names);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 8f53837d4d3e..97178b307ed6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -468,14 +468,18 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
}
-/*
- * Helper function to query RAS EEPROM address
- *
- * @adev: amdgpu_device pointer
+/**
+ * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
+ * adev: amdgpu_device pointer
+ * i2c_address: pointer to u8; if not NULL, will contain
+ * the RAS EEPROM address if the function returns true
*
- * Return true if vbios supports ras rom address reporting
+ * Return true if VBIOS supports RAS EEPROM address reporting,
+ * else return false. If true and @i2c_address is not NULL,
+ * will contain the RAS ROM address.
*/
-bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address)
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
+ u8 *i2c_address)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index;
@@ -483,27 +487,39 @@ bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_a
union firmware_info *firmware_info;
u8 frev, crev;
- if (i2c_address == NULL)
- return false;
-
- *i2c_address = 0;
-
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- firmwareinfo);
+ firmwareinfo);
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
- index, &size, &frev, &crev, &data_offset)) {
+ index, &size, &frev, &crev,
+ &data_offset)) {
/* support firmware_info 3.4 + */
if ((frev == 3 && crev >=4) || (frev > 3)) {
firmware_info = (union firmware_info *)
(mode_info->atom_context->bios + data_offset);
- *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+ /* The ras_rom_i2c_slave_addr should ideally
+ * be a 19-bit EEPROM address, which would be
+ * used as is by the driver; see top of
+ * amdgpu_eeprom.c.
+ *
+ * When this is the case, 0 is of course a
+ * valid RAS EEPROM address, in which case,
+ * we'll drop the first "if (firm...)" and only
+ * leave the check for the pointer.
+ *
+ * The reason this works right now is because
+ * ras_rom_i2c_slave_addr contains the EEPROM
+ * device type qualifier 1010b in the top 4
+ * bits.
+ */
+ if (firmware_info->v34.ras_rom_i2c_slave_addr) {
+ if (i2c_address)
+ *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+ return true;
+ }
}
}
- if (*i2c_address != 0)
- return true;
-
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 8e5a7ac8c36f..7a7316731911 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -522,6 +522,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
break;
case CHIP_RENOIR:
case CHIP_VANGOGH:
+ case CHIP_YELLOW_CARP:
domain |= AMDGPU_GEM_DOMAIN_GTT;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b6640291f980..f18240f87387 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1181,7 +1181,12 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
@@ -1197,6 +1202,11 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
/* DIMGREY_CAVEFISH */
@@ -1204,6 +1214,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73ED, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
/* Aldebaran */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index d94c5419ec25..5a6857c44bb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -59,6 +59,7 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
struct drm_file *file = f->private_data;
struct amdgpu_device *adev = drm_to_adev(file->minor->dev);
+ struct amdgpu_bo *root;
int ret;
ret = amdgpu_file_to_fpriv(f, &fpriv);
@@ -69,13 +70,19 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
dev = PCI_SLOT(adev->pdev->devfn);
fn = PCI_FUNC(adev->pdev->devfn);
- ret = amdgpu_bo_reserve(fpriv->vm.root.bo, false);
+ root = amdgpu_bo_ref(fpriv->vm.root.bo);
+ if (!root)
+ return;
+
+ ret = amdgpu_bo_reserve(root, false);
if (ret) {
DRM_ERROR("Fail to reserve bo\n");
return;
}
amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, &gtt_mem, &cpu_mem);
- amdgpu_bo_unreserve(fpriv->vm.root.bo);
+ amdgpu_bo_unreserve(root);
+ amdgpu_bo_unref(&root);
+
seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus,
dev, fn, fpriv->vm.pasid);
seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 14499f0de32d..8d682befe0d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -552,6 +552,9 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
+ if (!ring->no_scheduler)
+ drm_sched_stop(&ring->sched, NULL);
+
/* You can't wait for HW to signal if it's gone */
if (!drm_dev_is_unplugged(&adev->ddev))
r = amdgpu_fence_wait_empty(ring);
@@ -611,6 +614,11 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
+ if (!ring->no_scheduler) {
+ drm_sched_resubmit_jobs(&ring->sched);
+ drm_sched_start(&ring->sched, true);
+ }
+
/* enable the interrupt */
if (ring->fence_drv.irq_src)
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index cb07cc3b06ed..d6aa032890ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -341,21 +341,18 @@ retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
initial_domain,
flags, ttm_bo_type_device, resv, &gobj);
- if (r) {
- if (r != -ERESTARTSYS) {
- if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
- flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- goto retry;
- }
+ if (r && r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
- if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
- initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
- goto retry;
- }
- DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
- size, initial_domain, args->in.alignment, r);
+ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+ goto retry;
}
- return r;
+ DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
+ size, initial_domain, args->in.alignment, r);
}
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 543000304a1c..675a72ef305d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -118,7 +118,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: the resulting mem object
*
* Dummy, allocate the node but no space for it yet.
*/
@@ -182,7 +182,7 @@ err_out:
* amdgpu_gtt_mgr_del - free ranges
*
* @man: TTM memory type manager
- * @mem: TTM memory object
+ * @res: TTM memory object
*
* Free the allocated GTT again.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 23efdc672502..9b41cb8c3de5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -469,10 +469,10 @@ psp_cmd_submit_buf(struct psp_context *psp,
*/
if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
if (ucode)
- DRM_WARN("failed to load ucode (%s) ",
- amdgpu_ucode_name(ucode->ucode_id));
- DRM_WARN("psp gfx command (%s) failed and response status is (0x%X)\n",
- psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
+ DRM_WARN("failed to load ucode %s(0x%X) ",
+ amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
+ DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
+ psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
psp->cmd_buf_mem->resp.status);
if (!timeout) {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 9dc3b2d88176..dc44c946a244 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -114,27 +114,24 @@ static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
struct amdgpu_ras_eeprom_control *control)
{
- uint8_t ras_rom_i2c_slave_addr;
+ u8 i2c_addr;
if (!control)
return false;
- control->i2c_address = 0;
-
- if (amdgpu_atomfirmware_ras_rom_addr(adev, &ras_rom_i2c_slave_addr))
- {
- switch (ras_rom_i2c_slave_addr) {
- case 0xA0:
- control->i2c_address = 0;
- return true;
- case 0xA8:
- control->i2c_address = 0x40000;
- return true;
- default:
- dev_warn(adev->dev, "RAS EEPROM I2C slave address %02x not supported",
- ras_rom_i2c_slave_addr);
- return false;
- }
+ if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
+ /* The address given by VBIOS is an 8-bit, wire-format
+ * address, i.e. the most significant byte.
+ *
+ * Normalize it to a 19-bit EEPROM address. Remove the
+ * device type identifier and make it a 7-bit address;
+ * then make it a 19-bit EEPROM address. See top of
+ * amdgpu_eeprom.c.
+ */
+ i2c_addr = (i2c_addr & 0x0F) >> 1;
+ control->i2c_address = ((u32) i2c_addr) << 16;
+
+ return true;
}
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 2fd77c36a1ff..7b2b0980ec41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -361,7 +361,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: the resulting mem object
*
* Allocate VRAM for the given BO.
*/
@@ -487,7 +487,7 @@ error_sub:
* amdgpu_vram_mgr_del - free ranges
*
* @man: TTM memory type manager
- * @mem: TTM memory object
+ * @res: TTM memory object
*
* Free the allocated VRAM again.
*/
@@ -522,7 +522,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
* amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
*
* @adev: amdgpu device pointer
- * @mem: TTM memory object
+ * @res: TTM memory object
* @offset: byte offset from the base of VRAM BO
* @length: number of bytes to export in sg_table
* @dev: the other device
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index ff2307d7ee0f..23b066bcffb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -258,6 +258,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
amdgpu_virt_fini_data_exchange(adev);
atomic_set(&adev->in_gpu_reset, 1);
+ xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
+
do {
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
goto flr_done;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 50572635d0f8..bd3b23171579 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -37,6 +37,7 @@ enum idh_request {
IDH_REQ_GPU_RESET_ACCESS,
IDH_LOG_VF_ERROR = 200,
+ IDH_READY_TO_RESET = 201,
};
enum idh_event {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index ba1d3ab869c1..f50045cebd44 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -85,11 +85,14 @@
#define mmRCC_DEV0_EPF0_STRAP0_ALDE 0x0015
#define mmRCC_DEV0_EPF0_STRAP0_ALDE_BASE_IDX 2
-#define mmBIF_DOORBELL_INT_CNTL_ALDE 0x3878
+#define mmBIF_DOORBELL_INT_CNTL_ALDE 0x00fe
#define mmBIF_DOORBELL_INT_CNTL_ALDE_BASE_IDX 2
#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18
#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
+#define mmBIF_INTR_CNTL_ALDE 0x0101
+#define mmBIF_INTR_CNTL_ALDE_BASE_IDX 2
+
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
@@ -440,14 +443,23 @@ static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
*/
uint32_t bif_intr_cntl;
- bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
+ else
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+
if (state == AMDGPU_IRQ_STATE_ENABLE) {
/* set interrupt vector select bit to 0 to select
* vetcor 1 for bare metal case */
bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
BIF_INTR_CNTL,
RAS_INTR_VEC_SEL, 0);
- WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
+ else
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
}
return 0;
@@ -476,14 +488,22 @@ static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *ade
*/
uint32_t bif_intr_cntl;
- bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
+ else
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+
if (state == AMDGPU_IRQ_STATE_ENABLE) {
/* set interrupt vector select bit to 0 to select
* vetcor 1 for bare metal case */
bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
BIF_INTR_CNTL,
RAS_INTR_VEC_SEL, 0);
- WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
+ else
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 42a35d9520f9..fe9a7cc8d9eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -904,14 +904,7 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev)
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_TOPAZ:
- /* Disable BACO support for the specific polaris12 SKU temporarily */
- if ((adev->pdev->device == 0x699F) &&
- (adev->pdev->revision == 0xC7) &&
- (adev->pdev->subsystem_vendor == 0x1028) &&
- (adev->pdev->subsystem_device == 0x0039))
- return false;
- else
- return amdgpu_dpm_is_baco_supported(adev);
+ return amdgpu_dpm_is_baco_supported(adev);
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 491373fcdb38..9fc8021bb0ab 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2484,7 +2484,8 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
}
if (!p->xnack_enabled) {
pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
- return -EFAULT;
+ r = -EFAULT;
+ goto out;
}
svms = &p->svms;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 816723691d51..9b1fc54555ee 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1200,7 +1200,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc);
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (adev->apu_flags) {
+ if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
struct dc_phy_addr_space_config pa_config;
mmhub_read_system_context(adev, &pa_config);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index cd025c12f17b..330edd666b7d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1561,7 +1561,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
struct dc_link *link,
const struct dc_link_settings *link_setting)
{
- struct link_training_settings lt_settings;
+ struct link_training_settings lt_settings = {0};
dp_decide_training_settings(
link,
@@ -1707,7 +1707,7 @@ enum link_training_result dc_link_dp_perform_link_training(
bool skip_video_pattern)
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
- struct link_training_settings lt_settings;
+ struct link_training_settings lt_settings = {0};
enum dp_link_encoding encoding =
dp_get_link_encoding_format(link_settings);
@@ -1923,7 +1923,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
struct dc_link_settings *link_settings,
struct dc_link_training_overrides *lt_overrides)
{
- struct link_training_settings lt_settings;
+ struct link_training_settings lt_settings = {0};
enum link_training_result lt_status = LINK_TRAINING_SUCCESS;
enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT;
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index a612ba6dc389..ab6bc5d79012 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -28,9 +28,9 @@
*/
#include <linux/delay.h>
+#include <linux/stdarg.h>
#include "dm_services.h"
-#include <stdarg.h>
#include "dc.h"
#include "dc_dmub_srv.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index dc7823d23ba8..dd38796ba30a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -510,8 +510,12 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id
vpg = dcn303_vpg_create(ctx, vpg_inst);
afmt = dcn303_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt)
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
return NULL;
+ }
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id],
&se_shift, &se_mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 7db268da6976..3b3721386571 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -109,7 +109,7 @@ bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl)
union dmub_rb_cmd cmd;
if (!dcn31_query_backlight_info(panel_cntl, &cmd))
- return 0;
+ return false;
return cmd.panel_cntl.data.is_backlight_on;
}
@@ -119,7 +119,7 @@ bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl)
union dmub_rb_cmd cmd;
if (!dcn31_query_backlight_info(panel_cntl, &cmd))
- return 0;
+ return false;
return cmd.panel_cntl.data.is_powered_on;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index fbed5304692d..63bbdf8b8678 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2641,7 +2641,7 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
if (mode_lib->vba.DRAMClockChangeWatermark >
- dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+ dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
mode_lib->vba.MinTTUVBlank[k] += 25;
}
}
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 7b684e7f60df..7efe9ba8706e 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -39,7 +39,6 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/delay.h>
-#include <stdarg.h>
#include "atomfirmware.h"
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 2d55627b05b1..249cb0aeb5ae 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2005,10 +2005,10 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 715b4225f5ee..8156729c370b 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -1335,6 +1335,30 @@ enum smu_cmn2asic_mapping_type {
#define WORKLOAD_MAP(profile, workload) \
[profile] = {1, (workload)}
+/**
+ * smu_memcpy_trailing - Copy the end of one structure into the middle of another
+ *
+ * @dst: Pointer to destination struct
+ * @first_dst_member: The member name in @dst where the overwrite begins
+ * @last_dst_member: The member name in @dst where the overwrite ends after
+ * @src: Pointer to the source struct
+ * @first_src_member: The member name in @src where the copy begins
+ *
+ */
+#define smu_memcpy_trailing(dst, first_dst_member, last_dst_member, \
+ src, first_src_member) \
+({ \
+ size_t __src_offset = offsetof(typeof(*(src)), first_src_member); \
+ size_t __src_size = sizeof(*(src)) - __src_offset; \
+ size_t __dst_offset = offsetof(typeof(*(dst)), first_dst_member); \
+ size_t __dst_size = offsetofend(typeof(*(dst)), last_dst_member) - \
+ __dst_offset; \
+ BUILD_BUG_ON(__src_size != __dst_size); \
+ __builtin_memcpy((u8 *)(dst) + __dst_offset, \
+ (u8 *)(src) + __src_offset, \
+ __dst_size); \
+})
+
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
int smu_get_power_limit(void *handle,
uint32_t *limit,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 465ff8d2a01a..e7803ce8f67a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -27,6 +27,9 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <asm/div64.h>
+#if IS_ENABLED(CONFIG_X86_64)
+#include <asm/intel-family.h>
+#endif
#include <drm/amdgpu_drm.h>
#include "ppatomctrl.h"
#include "atombios.h"
@@ -1733,6 +1736,17 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
return result;
}
+static bool intel_core_rkl_chk(void)
+{
+#if IS_ENABLED(CONFIG_X86_64)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
+#else
+ return false;
+#endif
+}
+
static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1758,7 +1772,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
- data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
+ data->pcie_dpm_key_disabled =
+ intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
/* need to set voltage control types before EVV patching */
data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 273df66cac14..e343cc218990 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -483,10 +483,8 @@ static int arcturus_append_powerplay_table(struct smu_context *smu)
if ((smc_dpm_table->table_header.format_revision == 4) &&
(smc_dpm_table->table_header.content_revision == 6))
- memcpy(&smc_pptable->MaxVoltageStepGfx,
- &smc_dpm_table->maxvoltagestepgfx,
- sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_6, maxvoltagestepgfx));
-
+ smu_memcpy_trailing(smc_pptable, MaxVoltageStepGfx, BoardReserved,
+ smc_dpm_table, maxvoltagestepgfx);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index f96681700c41..a5fc5d7cb6c7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -431,16 +431,16 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
switch (smc_dpm_table->table_header.content_revision) {
case 5: /* nv10 and nv14 */
- memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
- sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
+ smu_memcpy_trailing(smc_pptable, I2cControllers, BoardReserved,
+ smc_dpm_table, I2cControllers);
break;
case 7: /* nv12 */
ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
(uint8_t **)&smc_dpm_table_v4_7);
if (ret)
return ret;
- memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers,
- sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header));
+ smu_memcpy_trailing(smc_pptable, I2cControllers, BoardReserved,
+ smc_dpm_table_v4_7, I2cControllers);
break;
default:
dev_err(smu->adev->dev, "smc_dpm_info with unsupported content revision %d!\n",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 6eb50b05a33c..3a3421452e57 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -1869,7 +1869,7 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev,
- "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq);
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index b39138041141..5aa175e12a78 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -426,7 +426,7 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev,
- "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq);
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index ec8c30daf31c..ab652028e003 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -409,9 +409,8 @@ static int aldebaran_append_powerplay_table(struct smu_context *smu)
if ((smc_dpm_table->table_header.format_revision == 4) &&
(smc_dpm_table->table_header.content_revision == 10))
- memcpy(&smc_pptable->GfxMaxCurrent,
- &smc_dpm_table->GfxMaxCurrent,
- sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_10, GfxMaxCurrent));
+ smu_memcpy_trailing(smc_pptable, GfxMaxCurrent, reserved,
+ smc_dpm_table, GfxMaxCurrent);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 0f17c2522c85..627ba2eec7fd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -731,7 +731,7 @@ static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev,
- "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq);
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1d009494af8b..deb23dbec8b5 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -807,8 +807,8 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
struct page **pages, unsigned int nr_pages)
{
struct sg_table *sg;
- struct scatterlist *sge;
size_t max_segment = 0;
+ int err;
sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!sg)
@@ -818,13 +818,12 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
max_segment = dma_max_mapping_size(dev->dev);
if (max_segment == 0)
max_segment = UINT_MAX;
- sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT,
- max_segment,
- NULL, 0, GFP_KERNEL);
- if (IS_ERR(sge)) {
+ err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT,
+ max_segment, GFP_KERNEL);
+ if (err) {
kfree(sg);
- sg = ERR_CAST(sge);
+ sg = ERR_PTR(err);
}
return sg;
}
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 111b932cf2a9..f783d4963d4b 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -25,7 +25,7 @@
#define DEBUG /* for pr_debug() */
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 13b217f75055..e5ae9c06510c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -42,7 +42,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
- if (is_swiotlb_active()) {
+ if (is_swiotlb_active(obj->base.dev->dev)) {
unsigned int max_segment;
max_segment = swiotlb_max_segment();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 771eb2963123..35eedc14f522 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -382,7 +382,6 @@ i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
{
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
- struct scatterlist *sg;
struct sg_table *st;
int ret;
@@ -393,13 +392,13 @@ static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
if (!st)
return ERR_PTR(-ENOMEM);
- sg = __sg_alloc_table_from_pages
- (st, ttm->pages, ttm->num_pages, 0,
- (unsigned long)ttm->num_pages << PAGE_SHIFT,
- i915_sg_segment_size(), NULL, 0, GFP_KERNEL);
- if (IS_ERR(sg)) {
+ ret = sg_alloc_table_from_pages_segment(st,
+ ttm->pages, ttm->num_pages,
+ 0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
+ i915_sg_segment_size(), GFP_KERNEL);
+ if (ret) {
kfree(st);
- return ERR_CAST(sg);
+ return ERR_PTR(ret);
}
ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 468a7a617fbf..8ea0fa665e53 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -131,7 +131,6 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st;
unsigned int sg_page_sizes;
- struct scatterlist *sg;
struct page **pvec;
int ret;
@@ -148,13 +147,11 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pvec = obj->userptr.pvec;
alloc_table:
- sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
- num_pages << PAGE_SHIFT, max_segment,
- NULL, 0, GFP_KERNEL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
+ ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
+ num_pages << PAGE_SHIFT,
+ max_segment, GFP_KERNEL);
+ if (ret)
goto err;
- }
ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
index 51dbe0e3294e..d2969f68dd64 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -6,7 +6,7 @@
#ifndef INTEL_GT_REQUESTS_H
#define INTEL_GT_REQUESTS_H
-#include <stddef.h>
+#include <linux/stddef.h>
struct intel_engine_cs;
struct intel_gt;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 1ac98f8aba31..7efa386449d1 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -885,7 +885,7 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int intel_vgpu_open(struct mdev_device *mdev)
+static int intel_vgpu_open_device(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
@@ -1004,7 +1004,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
vgpu->handle = 0;
}
-static void intel_vgpu_release(struct mdev_device *mdev)
+static void intel_vgpu_close_device(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
@@ -1753,8 +1753,8 @@ static struct mdev_parent_ops intel_vgpu_ops = {
.create = intel_vgpu_create,
.remove = intel_vgpu_remove,
- .open = intel_vgpu_open,
- .release = intel_vgpu_release,
+ .open_device = intel_vgpu_open_device,
+ .close_device = intel_vgpu_close_device,
.read = intel_vgpu_read,
.write = intel_vgpu_write,
diff --git a/drivers/gpu/drm/mgag200/mgag200_pll.c b/drivers/gpu/drm/mgag200/mgag200_pll.c
index 7c903cf19c0d..e9ae22b4f813 100644
--- a/drivers/gpu/drm/mgag200/mgag200_pll.c
+++ b/drivers/gpu/drm/mgag200/mgag200_pll.c
@@ -124,6 +124,7 @@ static int mgag200_pixpll_compute_g200se_00(struct mgag200_pll *pixpll, long clo
unsigned int computed;
m = n = p = s = 0;
+ delta = 0xffffffff;
permitteddelta = clock * 5 / 1000;
for (testp = 8; testp > 0; testp /= 2) {
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
index c22b07f68670..4c619307612c 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
@@ -24,7 +24,6 @@
#include <linux/pm_runtime.h>
#include <linux/kthread.h>
#include <linux/devcoredump.h>
-#include <stdarg.h>
#include "msm_kms.h"
#define MSM_DISP_SNAPSHOT_MAX_BLKS 10
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index f4c2e46b6fe1..2ca9d9a9e5d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -276,7 +276,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
}
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
- need_swiotlb = is_swiotlb_active();
+ need_swiotlb = is_swiotlb_active(dev->dev);
#endif
ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 0da5b3100ab1..dfe5f1d29763 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -58,25 +58,16 @@ static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
}
static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
- u64 iova, size_t size)
+ u64 iova, u64 size)
{
u8 region_width;
u64 region = iova & PAGE_MASK;
- /*
- * fls returns:
- * 1 .. 32
- *
- * 10 + fls(num_pages)
- * results in the range (11 .. 42)
- */
-
- size = round_up(size, PAGE_SIZE);
- region_width = 10 + fls(size >> PAGE_SHIFT);
- if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
- /* not pow2, so must go up to the next pow2 */
- region_width += 1;
- }
+ /* The size is encoded as ceil(log2) minus(1), which may be calculated
+ * with fls. The size must be clamped to hardware bounds.
+ */
+ size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
+ region_width = fls64(size - 1) - 1;
region |= region_width;
/* Lock the region that needs to be updated */
@@ -87,7 +78,7 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
- u64 iova, size_t size, u32 op)
+ u64 iova, u64 size, u32 op)
{
if (as_nr < 0)
return 0;
@@ -104,7 +95,7 @@ static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
static int mmu_hw_do_operation(struct panfrost_device *pfdev,
struct panfrost_mmu *mmu,
- u64 iova, size_t size, u32 op)
+ u64 iova, u64 size, u32 op)
{
int ret;
@@ -121,7 +112,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
@@ -137,7 +128,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
{
- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
@@ -251,7 +242,7 @@ static size_t get_pgsize(u64 addr, size_t size)
static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
struct panfrost_mmu *mmu,
- u64 iova, size_t size)
+ u64 iova, u64 size)
{
if (mmu->as < 0)
return;
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index 1940ff86e49a..6c5a11ef1ee8 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -316,6 +316,8 @@
#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
+#define AS_LOCK_REGION_MIN_SIZE (1ULL << 15)
+
#define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define gpu_read(dev, reg) readl(dev->iomem + reg)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ea4add2b9717..bb9e02c31946 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1160,9 +1160,9 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
}
if (bo->deleted) {
- ttm_bo_cleanup_refs(bo, false, false, locked);
+ ret = ttm_bo_cleanup_refs(bo, false, false, locked);
ttm_bo_put(bo);
- return 0;
+ return ret == -EBUSY ? -ENOSPC : ret;
}
ttm_bo_del_from_lru(bo);
@@ -1216,7 +1216,7 @@ out:
if (locked)
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
- return ret;
+ return ret == -EBUSY ? -ENOSPC : ret;
}
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 763fa6f4e07d..1c5ffe2935af 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -143,7 +143,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_resource *src_mem = bo->resource;
struct ttm_resource_manager *src_man =
ttm_manager_type(bdev, src_mem->mem_type);
- struct ttm_resource src_copy = *src_mem;
union {
struct ttm_kmap_iter_tt tt;
struct ttm_kmap_iter_linear_io io;
@@ -173,11 +172,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
}
ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
- src_copy = *src_mem;
- ttm_bo_move_sync_cleanup(bo, dst_mem);
if (!src_iter->ops->maps_tt)
- ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, &src_copy);
+ ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
+ ttm_bo_move_sync_cleanup(bo, dst_mem);
+
out_src_iter:
if (!dst_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 24031a8acd2d..d5cd8b5dc0bf 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -32,7 +32,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/sched.h>
-#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <drm/drm_cache.h>
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index b7dc32a0c9bb..4a1115043114 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1462,7 +1462,7 @@ static const struct hdmi_codec_ops vc4_hdmi_codec_ops = {
.audio_startup = vc4_hdmi_audio_startup,
};
-struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
+static struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
.ops = &vc4_hdmi_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index b0973c27e774..8b8991e3ed2d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -328,7 +328,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
int ret = 0;
static size_t sgl_size;
static size_t sgt_size;
- struct scatterlist *sg;
if (vmw_tt->mapped)
return 0;
@@ -351,15 +350,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
if (unlikely(ret != 0))
return ret;
- sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
- vsgt->num_pages, 0,
- (unsigned long) vsgt->num_pages << PAGE_SHIFT,
- dma_get_max_seg_size(dev_priv->drm.dev),
- NULL, 0, GFP_KERNEL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
+ ret = sg_alloc_table_from_pages_segment(
+ &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
+ (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+ dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
+ if (ret)
goto out_sg_alloc_fail;
- }
if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
uint64_t over_alloc =
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 76937f716fbe..3c33bf572d6d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -259,10 +259,11 @@ config HID_PRODIKEYS
and some additional multimedia keys.
config HID_CMEDIA
- tristate "CMedia CM6533 HID audio jack controls"
+ tristate "CMedia audio chips"
depends on HID
help
- Support for CMedia CM6533 HID audio jack controls.
+ Support for CMedia CM6533 HID audio jack controls
+ and HS100B mute buttons.
config HID_CP2112
tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support"
@@ -952,7 +953,7 @@ config HID_SONY
* Buzz controllers
* Sony PS3 Blue-ray Disk Remote Control (Bluetooth)
* Logitech Harmony adapter for Sony Playstation 3 (Bluetooth)
- * Guitar Hero Live PS3 and Wii U guitar dongles
+ * Guitar Hero Live PS3, Wii U and PS4 guitar dongles
* Guitar Hero PS3 and PC guitar dongles
config SONY_FF
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 1ea1a7c0b20f..e29efcb1c040 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -115,7 +115,6 @@ obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o
obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o hid-thrustmaster.o
-obj-$(CONFIG_HID_TMINIT) += hid-tminit.o
obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index efb849411d25..840fd075c56f 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -17,7 +17,6 @@
#include "amd_sfh_pcie.h"
#include "amd_sfh_hid.h"
-#define AMD_SFH_IDLE_LOOP 200
struct request_list {
struct hid_device *hid;
@@ -123,14 +122,24 @@ static void amd_sfh_work_buffer(struct work_struct *work)
int i;
for (i = 0; i < cli_data->num_hid_devices; i++) {
- report_size = get_input_report(i, cli_data->sensor_idx[i], cli_data->report_id[i],
- in_data);
- hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT,
- in_data->input_report[i], report_size, 0);
+ if (cli_data->sensor_sts[i] == SENSOR_ENABLED) {
+ report_size = get_input_report
+ (i, cli_data->sensor_idx[i], cli_data->report_id[i], in_data);
+ hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT,
+ in_data->input_report[i], report_size, 0);
+ }
}
schedule_delayed_work(&cli_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
}
+u32 amd_sfh_wait_for_response(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts)
+{
+ if (mp2->mp2_ops->response)
+ sensor_sts = mp2->mp2_ops->response(mp2, sid, sensor_sts);
+
+ return sensor_sts;
+}
+
int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
{
struct amd_input_data *in_data = &privdata->in_data;
@@ -139,8 +148,8 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
struct device *dev;
u32 feature_report_size;
u32 input_report_size;
+ int rc, i, status;
u8 cl_idx;
- int rc, i;
dev = &privdata->pdev->dev;
@@ -155,7 +164,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
in_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
&cl_data->sensor_dma_addr[i],
GFP_KERNEL);
- cl_data->sensor_sts[i] = 0;
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
cl_data->sensor_requested_cnt[i] = 0;
cl_data->cur_hid_dev = i;
cl_idx = cl_data->sensor_idx[i];
@@ -184,7 +193,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
rc = -ENOMEM;
goto cleanup;
}
- info.period = msecs_to_jiffies(AMD_SFH_IDLE_LOOP);
+ info.period = AMD_SFH_IDLE_LOOP;
info.sensor_idx = cl_idx;
info.dma_address = cl_data->sensor_dma_addr[i];
@@ -197,11 +206,25 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
rc = get_report_descriptor(cl_idx, cl_data->report_descr[i]);
if (rc)
return rc;
- rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data);
- if (rc)
- return rc;
privdata->mp2_ops->start(privdata, info);
- cl_data->sensor_sts[i] = 1;
+ status = amd_sfh_wait_for_response
+ (privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
+ if (status == SENSOR_ENABLED) {
+ cl_data->sensor_sts[i] = SENSOR_ENABLED;
+ rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data);
+ if (rc) {
+ privdata->mp2_ops->stop(privdata, cl_data->sensor_idx[i]);
+ status = amd_sfh_wait_for_response
+ (privdata, cl_data->sensor_idx[i], SENSOR_DISABLED);
+ if (status != SENSOR_ENABLED)
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ dev_dbg(dev, "sid 0x%x status 0x%x\n",
+ cl_data->sensor_idx[i], cl_data->sensor_sts[i]);
+ goto cleanup;
+ }
+ }
+ dev_dbg(dev, "sid 0x%x status 0x%x\n",
+ cl_data->sensor_idx[i], cl_data->sensor_sts[i]);
}
schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
return 0;
@@ -224,10 +247,19 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
{
struct amdtp_cl_data *cl_data = privdata->cl_data;
struct amd_input_data *in_data = cl_data->in_data;
- int i;
+ int i, status;
- for (i = 0; i < cl_data->num_hid_devices; i++)
- privdata->mp2_ops->stop(privdata, i);
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+ privdata->mp2_ops->stop(privdata, cl_data->sensor_idx[i]);
+ status = amd_sfh_wait_for_response
+ (privdata, cl_data->sensor_idx[i], SENSOR_DISABLED);
+ if (status != SENSOR_ENABLED)
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ dev_dbg(&privdata->pdev->dev, "stopping sid 0x%x status 0x%x\n",
+ cl_data->sensor_idx[i], cl_data->sensor_sts[i]);
+ }
+ }
cancel_delayed_work_sync(&cl_data->work);
cancel_delayed_work_sync(&cl_data->work_buffer);
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 8d68796aa905..79b138fd4261 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -13,6 +13,7 @@
#include <linux/dmi.h>
#include <linux/interrupt.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -31,6 +32,20 @@ static int sensor_mask_override = -1;
module_param_named(sensor_mask, sensor_mask_override, int, 0444);
MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
+static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts)
+{
+ union cmd_response cmd_resp;
+
+ /* Get response with status within a max of 800 ms timeout */
+ if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
+ (cmd_resp.response_v2.response == sensor_sts &&
+ cmd_resp.response_v2.status == 0 && (sid == 0xff ||
+ cmd_resp.response_v2.sensor_id == sid)), 500, 800000))
+ return cmd_resp.response_v2.response;
+
+ return SENSOR_DISABLED;
+}
+
static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
{
union sfh_cmd_base cmd_base;
@@ -183,6 +198,7 @@ static const struct amd_mp2_ops amd_sfh_ops_v2 = {
.start = amd_start_sensor_v2,
.stop = amd_stop_sensor_v2,
.stop_all = amd_stop_all_sensor_v2,
+ .response = amd_sfh_wait_response_v2,
};
static const struct amd_mp2_ops amd_sfh_ops = {
@@ -248,6 +264,58 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
return amd_sfh_hid_client_init(privdata);
}
+static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct amd_mp2_dev *mp2 = pci_get_drvdata(pdev);
+ struct amdtp_cl_data *cl_data = mp2->cl_data;
+ struct amd_mp2_sensor_info info;
+ int i, status;
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_sts[i] == SENSOR_DISABLED) {
+ info.period = AMD_SFH_IDLE_LOOP;
+ info.sensor_idx = cl_data->sensor_idx[i];
+ info.dma_address = cl_data->sensor_dma_addr[i];
+ mp2->mp2_ops->start(mp2, info);
+ status = amd_sfh_wait_for_response
+ (mp2, cl_data->sensor_idx[i], SENSOR_ENABLED);
+ if (status == SENSOR_ENABLED)
+ cl_data->sensor_sts[i] = SENSOR_ENABLED;
+ dev_dbg(dev, "resume sid 0x%x status 0x%x\n",
+ cl_data->sensor_idx[i], cl_data->sensor_sts[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int __maybe_unused amd_mp2_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct amd_mp2_dev *mp2 = pci_get_drvdata(pdev);
+ struct amdtp_cl_data *cl_data = mp2->cl_data;
+ int i, status;
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_idx[i] != HPD_IDX &&
+ cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+ mp2->mp2_ops->stop(mp2, cl_data->sensor_idx[i]);
+ status = amd_sfh_wait_for_response
+ (mp2, cl_data->sensor_idx[i], SENSOR_DISABLED);
+ if (status != SENSOR_ENABLED)
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ dev_dbg(dev, "suspend sid 0x%x status 0x%x\n",
+ cl_data->sensor_idx[i], cl_data->sensor_sts[i]);
+ }
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(amd_mp2_pm_ops, amd_mp2_pci_suspend,
+ amd_mp2_pci_resume);
+
static const struct pci_device_id amd_mp2_pci_tbl[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_MP2) },
{ }
@@ -258,6 +326,7 @@ static struct pci_driver amd_mp2_pci_driver = {
.name = DRIVER_NAME,
.id_table = amd_mp2_pci_tbl,
.probe = amd_mp2_pci_probe,
+ .driver.pm = &amd_mp2_pm_ops,
};
module_pci_driver(amd_mp2_pci_driver);
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index 2d5c57e3782d..1ff6f83cb6fd 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -24,14 +24,20 @@
#define AMD_C2P_MSG2 0x10508
#define AMD_C2P_MSG(regno) (0x10500 + ((regno) * 4))
+#define AMD_P2C_MSG(regno) (0x10680 + ((regno) * 4))
/* MP2 P2C Message Registers */
#define AMD_P2C_MSG3 0x1068C /* Supported Sensors info */
#define V2_STATUS 0x2
+#define SENSOR_ENABLED 4
+#define SENSOR_DISABLED 5
+
#define HPD_IDX 16
+#define AMD_SFH_IDLE_LOOP 200
+
/* SFH Command register */
union sfh_cmd_base {
u32 ul;
@@ -51,6 +57,19 @@ union sfh_cmd_base {
} cmd_v2;
};
+union cmd_response {
+ u32 resp;
+ struct {
+ u32 status : 2;
+ u32 out_in_c2p : 1;
+ u32 rsvd1 : 1;
+ u32 response : 4;
+ u32 sub_cmd : 8;
+ u32 sensor_id : 6;
+ u32 rsvd2 : 10;
+ } response_v2;
+};
+
union sfh_cmd_param {
u32 ul;
struct {
@@ -112,10 +131,14 @@ void amd_stop_all_sensors(struct amd_mp2_dev *privdata);
int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id);
int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata);
int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata);
+u32 amd_sfh_wait_for_response(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts);
+void amd_mp2_suspend(struct amd_mp2_dev *mp2);
+void amd_mp2_resume(struct amd_mp2_dev *mp2);
struct amd_mp2_ops {
void (*start)(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info);
void (*stop)(struct amd_mp2_dev *privdata, u16 sensor_idx);
void (*stop_all)(struct amd_mp2_dev *privdata);
+ int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts);
};
#endif
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index dc6bd4299c54..833fcf07ff35 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -187,6 +187,15 @@ static const struct apple_key_translation *apple_find_translation(
return NULL;
}
+static void input_event_with_scancode(struct input_dev *input,
+ __u8 type, __u16 code, unsigned int hid, __s32 value)
+{
+ if (type == EV_KEY &&
+ (!test_bit(code, input->key)) == value)
+ input_event(input, EV_MSC, MSC_SCAN, hid);
+ input_event(input, type, code, value);
+}
+
static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
struct hid_usage *usage, __s32 value)
{
@@ -199,7 +208,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
if (usage->code == fn_keycode) {
asc->fn_on = !!value;
- input_event(input, usage->type, KEY_FN, value);
+ input_event_with_scancode(input, usage->type, KEY_FN,
+ usage->hid, value);
return 1;
}
@@ -240,7 +250,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
code = do_translate ? trans->to : trans->from;
}
- input_event(input, usage->type, code, value);
+ input_event_with_scancode(input, usage->type, code,
+ usage->hid, value);
return 1;
}
@@ -258,8 +269,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
clear_bit(usage->code,
asc->pressed_numlock);
- input_event(input, usage->type, trans->to,
- value);
+ input_event_with_scancode(input, usage->type,
+ trans->to, usage->hid, value);
}
return 1;
@@ -270,7 +281,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
if (hid->country == HID_COUNTRY_INTERNATIONAL_ISO) {
trans = apple_find_translation(apple_iso_keyboard, usage->code);
if (trans) {
- input_event(input, usage->type, trans->to, value);
+ input_event_with_scancode(input, usage->type,
+ trans->to, usage->hid, value);
return 1;
}
}
@@ -279,7 +291,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
if (swap_opt_cmd) {
trans = apple_find_translation(swapped_option_cmd_keys, usage->code);
if (trans) {
- input_event(input, usage->type, trans->to, value);
+ input_event_with_scancode(input, usage->type,
+ trans->to, usage->hid, value);
return 1;
}
}
@@ -287,7 +300,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
if (swap_fn_leftctrl) {
trans = apple_find_translation(swapped_fn_leftctrl_keys, usage->code);
if (trans) {
- input_event(input, usage->type, trans->to, value);
+ input_event_with_scancode(input, usage->type,
+ trans->to, usage->hid, value);
return 1;
}
}
@@ -306,8 +320,8 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field,
if ((asc->quirks & APPLE_INVERT_HWHEEL) &&
usage->code == REL_HWHEEL) {
- input_event(field->hidinput->input, usage->type, usage->code,
- -value);
+ input_event_with_scancode(field->hidinput->input, usage->type,
+ usage->code, usage->hid, -value);
return 1;
}
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index fb807c8e989b..f3ecddc519ee 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -82,6 +82,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_T90CHI BIT(9)
#define QUIRK_MEDION_E1239T BIT(10)
#define QUIRK_ROG_NKEY_KEYBOARD BIT(11)
+#define QUIRK_ROG_CLAYMORE_II_KEYBOARD BIT(12)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@@ -366,6 +367,17 @@ static int asus_raw_event(struct hid_device *hdev,
}
+ if (drvdata->quirks & QUIRK_ROG_CLAYMORE_II_KEYBOARD) {
+ /*
+ * CLAYMORE II keyboard sends this packet when it goes to sleep
+ * this causes the whole system to go into suspend.
+ */
+
+ if(size == 2 && data[0] == 0x02 && data[1] == 0x00) {
+ return -1;
+ }
+ }
+
return 0;
}
@@ -1226,6 +1238,9 @@ static const struct hid_device_id asus_devices[] = {
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2),
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD),
+ QUIRK_ROG_CLAYMORE_II_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
diff --git a/drivers/hid/hid-cmedia.c b/drivers/hid/hid-cmedia.c
index 3296c5050264..cab42047bc99 100644
--- a/drivers/hid/hid-cmedia.c
+++ b/drivers/hid/hid-cmedia.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* HID driver for CMedia CM6533 audio jack controls
+ * and HS100B mute buttons
*
* Copyright (C) 2015 Ben Chen <ben_chen@bizlinktech.com>
+ * Copyright (C) 2021 Thomas Weißschuh <linux@weissschuh.net>
*/
#include <linux/device.h>
@@ -11,13 +13,53 @@
#include "hid-ids.h"
MODULE_AUTHOR("Ben Chen");
-MODULE_DESCRIPTION("CM6533 HID jack controls");
+MODULE_AUTHOR("Thomas Weißschuh");
+MODULE_DESCRIPTION("CM6533 HID jack controls and HS100B mute button");
MODULE_LICENSE("GPL");
#define CM6533_JD_TYPE_COUNT 1
#define CM6533_JD_RAWEV_LEN 16
#define CM6533_JD_SFX_OFFSET 8
+#define HS100B_RDESC_ORIG_SIZE 60
+
+/* Fixed report descriptor of HS-100B audio chip
+ * Bit 4 is an abolute Microphone mute usage instead of being unassigned.
+ */
+static __u8 hs100b_rdesc_fixed[] = {
+ 0x05, 0x0C, /* Usage Page (Consumer), */
+ 0x09, 0x01, /* Usage (Consumer Control), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0xE9, /* Usage (Volume Inc), */
+ 0x09, 0xEA, /* Usage (Volume Dec), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0xE2, /* Usage (Mute), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x06, /* Input (Variable, Relative), */
+ 0x05, 0x0B, /* Usage Page (Telephony), */
+ 0x09, 0x2F, /* Usage (2Fh), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x20, /* Usage (20h), */
+ 0x81, 0x06, /* Input (Variable, Relative), */
+ 0x05, 0x0C, /* Usage Page (Consumer), */
+ 0x09, 0x00, /* Usage (00h), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x09, 0x00, /* Usage (00h), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x00, /* Usage (00h), */
+ 0x95, 0x04, /* Report Count (4), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0xC0 /* End Collection */
+};
+
/*
*
*CM6533 audio jack HID raw events:
@@ -156,5 +198,49 @@ static struct hid_driver cmhid_driver = {
.remove = cmhid_remove,
.input_mapping = cmhid_input_mapping,
};
-module_hid_driver(cmhid_driver);
+static __u8 *cmhid_hs100b_report_fixup(struct hid_device *hid, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize == HS100B_RDESC_ORIG_SIZE) {
+ hid_info(hid, "Fixing CMedia HS-100B report descriptor\n");
+ rdesc = hs100b_rdesc_fixed;
+ *rsize = sizeof(hs100b_rdesc_fixed);
+ }
+ return rdesc;
+}
+
+static const struct hid_device_id cmhid_hs100b_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CMEDIA_HS100B) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, cmhid_hs100b_devices);
+
+static struct hid_driver cmhid_hs100b_driver = {
+ .name = "cmedia_hs100b",
+ .id_table = cmhid_hs100b_devices,
+ .report_fixup = cmhid_hs100b_report_fixup,
+};
+
+static int cmedia_init(void)
+{
+ int ret;
+
+ ret = hid_register_driver(&cmhid_driver);
+ if (ret)
+ return ret;
+
+ ret = hid_register_driver(&cmhid_hs100b_driver);
+ if (ret)
+ hid_unregister_driver(&cmhid_driver);
+
+ return ret;
+}
+module_init(cmedia_init);
+
+static void cmedia_exit(void)
+{
+ hid_unregister_driver(&cmhid_driver);
+ hid_unregister_driver(&cmhid_hs100b_driver);
+}
+module_exit(cmedia_exit);
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 0d22713a3874..383dfda8c12f 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -228,13 +228,15 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct elo_priv *priv;
int ret;
+ struct usb_device *udev;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
INIT_DELAYED_WORK(&priv->work, elo_work);
- priv->usbdev = interface_to_usbdev(to_usb_interface(hdev->dev.parent));
+ udev = interface_to_usbdev(to_usb_interface(hdev->dev.parent));
+ priv->usbdev = usb_get_dev(udev);
hid_set_drvdata(hdev, priv);
@@ -265,6 +267,8 @@ static void elo_remove(struct hid_device *hdev)
{
struct elo_priv *priv = hid_get_drvdata(hdev);
+ usb_put_dev(priv->usbdev);
+
hid_hw_stop(hdev);
cancel_delayed_work_sync(&priv->work);
kfree(priv);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 8f1893e68112..29564b370341 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -41,9 +41,6 @@
#define USB_VENDOR_ID_ACTIONSTAR 0x2101
#define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011
-#define USB_VENDOR_ID_ACTIVISION 0x1430
-#define USB_DEVICE_ID_ACTIVISION_GUITAR_DONGLE 0x474c
-
#define USB_VENDOR_ID_ADS_TECH 0x06e1
#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155
@@ -197,6 +194,7 @@
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6
+#define USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD 0x196b
#define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
#define USB_VENDOR_ID_ATEN 0x0557
@@ -292,6 +290,7 @@
#define USB_VENDOR_ID_CMEDIA 0x0d8c
#define USB_DEVICE_ID_CM109 0x000e
+#define USB_DEVICE_ID_CMEDIA_HS100B 0x0014
#define USB_DEVICE_ID_CM6533 0x0022
#define USB_VENDOR_ID_CODEMERCS 0x07c0
@@ -1018,6 +1017,10 @@
#define USB_VENDOR_ID_REALTEK 0x0bda
#define USB_DEVICE_ID_REALTEK_READER 0x0152
+#define USB_VENDOR_ID_REDOCTANE 0x1430
+#define USB_DEVICE_ID_REDOCTANE_GUITAR_DONGLE 0x474c
+#define USB_DEVICE_ID_REDOCTANE_PS4_GHLIVE_DONGLE 0x07bb
+
#define USB_VENDOR_ID_RETROUSB 0xf000
#define USB_DEVICE_ID_RETROUSB_SNES_RETROPAD 0x0003
#define USB_DEVICE_ID_RETROUSB_SNES_RETROPORT 0x00f1
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 4286a51f7f16..4b5ebeacd283 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -419,8 +419,6 @@ static int hidinput_get_battery_property(struct power_supply *psy,
if (dev->battery_status == HID_BATTERY_UNKNOWN)
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
- else if (dev->battery_capacity == 100)
- val->intval = POWER_SUPPLY_STATUS_FULL;
else
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
break;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 61635e629469..81de88ab2ecc 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -1331,6 +1331,43 @@ static int hidpp20_battery_get_battery_voltage(struct hidpp_device *hidpp,
return 0;
}
+static int hidpp20_map_battery_capacity(struct hid_device *hid_dev, int voltage)
+{
+ /* NB: This voltage curve doesn't necessarily map perfectly to all
+ * devices that implement the BATTERY_VOLTAGE feature. This is because
+ * there are a few devices that use different battery technology.
+ */
+
+ static const int voltages[] = {
+ 4186, 4156, 4143, 4133, 4122, 4113, 4103, 4094, 4086, 4075,
+ 4067, 4059, 4051, 4043, 4035, 4027, 4019, 4011, 4003, 3997,
+ 3989, 3983, 3976, 3969, 3961, 3955, 3949, 3942, 3935, 3929,
+ 3922, 3916, 3909, 3902, 3896, 3890, 3883, 3877, 3870, 3865,
+ 3859, 3853, 3848, 3842, 3837, 3833, 3828, 3824, 3819, 3815,
+ 3811, 3808, 3804, 3800, 3797, 3793, 3790, 3787, 3784, 3781,
+ 3778, 3775, 3772, 3770, 3767, 3764, 3762, 3759, 3757, 3754,
+ 3751, 3748, 3744, 3741, 3737, 3734, 3730, 3726, 3724, 3720,
+ 3717, 3714, 3710, 3706, 3702, 3697, 3693, 3688, 3683, 3677,
+ 3671, 3666, 3662, 3658, 3654, 3646, 3633, 3612, 3579, 3537
+ };
+
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(voltages) != 100);
+
+ if (unlikely(voltage < 3500 || voltage >= 5000))
+ hid_warn_once(hid_dev,
+ "%s: possibly using the wrong voltage curve\n",
+ __func__);
+
+ for (i = 0; i < ARRAY_SIZE(voltages); i++) {
+ if (voltage >= voltages[i])
+ return ARRAY_SIZE(voltages) - i;
+ }
+
+ return 0;
+}
+
static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp)
{
u8 feature_type;
@@ -1354,6 +1391,8 @@ static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp)
hidpp->battery.status = status;
hidpp->battery.voltage = voltage;
+ hidpp->battery.capacity = hidpp20_map_battery_capacity(hidpp->hid_dev,
+ voltage);
hidpp->battery.level = level;
hidpp->battery.charge_type = charge_type;
hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -1378,6 +1417,8 @@ static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp,
if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) {
hidpp->battery.voltage = voltage;
+ hidpp->battery.capacity = hidpp20_map_battery_capacity(hidpp->hid_dev,
+ voltage);
hidpp->battery.status = status;
hidpp->battery.level = level;
hidpp->battery.charge_type = charge_type;
@@ -2240,11 +2281,10 @@ static int hidpp_ff_queue_work(struct hidpp_ff_private_data *data, int effect_id
wd->size = size;
memcpy(wd->params, params, size);
- atomic_inc(&data->workqueue_size);
+ s = atomic_inc_return(&data->workqueue_size);
queue_work(data->wq, &wd->work);
/* warn about excessive queue size */
- s = atomic_read(&data->workqueue_size);
if (s >= 20 && s % 20 == 0)
hid_warn(data->hidpp->hid_dev, "Force feedback command queue contains %d commands, causing substantial delays!", s);
@@ -3717,7 +3757,8 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3;
if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE ||
- hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_PERCENTAGE)
+ hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_PERCENTAGE ||
+ hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
battery_props[num_battery_props++] =
POWER_SUPPLY_PROP_CAPACITY;
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 8bcaee4ccae0..686788ebf3e1 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -68,6 +68,10 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define TOUCH_STATE_START 0x30
#define TOUCH_STATE_DRAG 0x40
+/* Number of high-resolution events for each low-resolution detent. */
+#define SCROLL_HR_STEPS 10
+#define SCROLL_HR_MULT (120 / SCROLL_HR_STEPS)
+#define SCROLL_HR_THRESHOLD 90 /* units */
#define SCROLL_ACCEL_DEFAULT 7
/* Touch surface information. Dimension is in hundredths of a mm, min and max
@@ -126,7 +130,11 @@ struct magicmouse_sc {
short y;
short scroll_x;
short scroll_y;
+ short scroll_x_hr;
+ short scroll_y_hr;
u8 size;
+ bool scroll_x_active;
+ bool scroll_y_active;
} touches[16];
int tracking_ids[16];
@@ -248,12 +256,20 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
unsigned long now = jiffies;
int step_x = msc->touches[id].scroll_x - x;
int step_y = msc->touches[id].scroll_y - y;
+ int step_hr = ((64 - (int)scroll_speed) * msc->scroll_accel) /
+ SCROLL_HR_STEPS;
+ int step_x_hr = msc->touches[id].scroll_x_hr - x;
+ int step_y_hr = msc->touches[id].scroll_y_hr - y;
/* Calculate and apply the scroll motion. */
switch (state) {
case TOUCH_STATE_START:
msc->touches[id].scroll_x = x;
msc->touches[id].scroll_y = y;
+ msc->touches[id].scroll_x_hr = x;
+ msc->touches[id].scroll_y_hr = y;
+ msc->touches[id].scroll_x_active = false;
+ msc->touches[id].scroll_y_active = false;
/* Reset acceleration after half a second. */
if (scroll_acceleration && time_before(now,
@@ -280,6 +296,40 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
msc->scroll_jiffies = now;
input_report_rel(input, REL_WHEEL, step_y);
}
+
+ if (!msc->touches[id].scroll_x_active &&
+ abs(step_x_hr) > SCROLL_HR_THRESHOLD) {
+ msc->touches[id].scroll_x_active = true;
+ msc->touches[id].scroll_x_hr = x;
+ step_x_hr = 0;
+ }
+
+ step_x_hr /= step_hr;
+ if (step_x_hr != 0 &&
+ msc->touches[id].scroll_x_active) {
+ msc->touches[id].scroll_x_hr -= step_x_hr *
+ step_hr;
+ input_report_rel(input,
+ REL_HWHEEL_HI_RES,
+ -step_x_hr * SCROLL_HR_MULT);
+ }
+
+ if (!msc->touches[id].scroll_y_active &&
+ abs(step_y_hr) > SCROLL_HR_THRESHOLD) {
+ msc->touches[id].scroll_y_active = true;
+ msc->touches[id].scroll_y_hr = y;
+ step_y_hr = 0;
+ }
+
+ step_y_hr /= step_hr;
+ if (step_y_hr != 0 &&
+ msc->touches[id].scroll_y_active) {
+ msc->touches[id].scroll_y_hr -= step_y_hr *
+ step_hr;
+ input_report_rel(input,
+ REL_WHEEL_HI_RES,
+ step_y_hr * SCROLL_HR_MULT);
+ }
break;
}
}
@@ -481,6 +531,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
if (emulate_scroll_wheel) {
__set_bit(REL_WHEEL, input->relbit);
__set_bit(REL_HWHEEL, input->relbit);
+ __set_bit(REL_WHEEL_HI_RES, input->relbit);
+ __set_bit(REL_HWHEEL_HI_RES, input->relbit);
}
} else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
/* setting the device name to ensure the same driver settings
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 51b39bda9a9d..2e104682c22b 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -662,8 +662,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
-#endif
-#if IS_ENABLED(CONFIG_HID_TMINIT)
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65d) },
#endif
#if IS_ENABLED(CONFIG_HID_TIVO)
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b3722c51ec78..d1b107d547f5 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -11,8 +11,9 @@
* Copyright (c) 2013 Colin Leitner <colin.leitner@gmail.com>
* Copyright (c) 2014-2016 Frank Praznik <frank.praznik@gmail.com>
* Copyright (c) 2018 Todd Kelner
- * Copyright (c) 2020 Pascal Giard <pascal.giard@etsmtl.ca>
+ * Copyright (c) 2020-2021 Pascal Giard <pascal.giard@etsmtl.ca>
* Copyright (c) 2020 Sanjay Govind <sanjay.govind9@gmail.com>
+ * Copyright (c) 2021 Daniel Nguyen <daniel.nguyen.1@ens.etsmtl.ca>
*/
/*
@@ -62,6 +63,7 @@
#define SHANWAN_GAMEPAD BIT(16)
#define GH_GUITAR_CONTROLLER BIT(17)
#define GHL_GUITAR_PS3WIIU BIT(18)
+#define GHL_GUITAR_PS4 BIT(19)
#define SIXAXIS_CONTROLLER (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)
#define MOTION_CONTROLLER (MOTION_CONTROLLER_USB | MOTION_CONTROLLER_BT)
@@ -85,18 +87,27 @@
#define NSG_MRXU_MAX_X 1667
#define NSG_MRXU_MAX_Y 1868
-#define GHL_GUITAR_POKE_INTERVAL 10 /* In seconds */
+/* The PS3/Wii U dongles require a poke every 10 seconds, but the PS4
+ * requires one every 8 seconds. Using 8 seconds for all for simplicity.
+ */
+#define GHL_GUITAR_POKE_INTERVAL 8 /* In seconds */
#define GUITAR_TILT_USAGE 44
-/* Magic value and data taken from GHLtarUtility:
+/* Magic data taken from GHLtarUtility:
* https://github.com/ghlre/GHLtarUtility/blob/master/PS3Guitar.cs
* Note: The Wii U and PS3 dongles happen to share the same!
*/
-static const u16 ghl_ps3wiiu_magic_value = 0x201;
static const char ghl_ps3wiiu_magic_data[] = {
0x02, 0x08, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00
};
+/* Magic data for the PS4 dongles sniffed with a USB protocol
+ * analyzer.
+ */
+static const char ghl_ps4_magic_data[] = {
+ 0x30, 0x02, 0x08, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
/* PS/3 Motion controller */
static u8 motion_rdesc[] = {
0x05, 0x01, /* Usage Page (Desktop), */
@@ -642,14 +653,14 @@ static void ghl_magic_poke(struct timer_list *t)
hid_err(sc->hdev, "usb_submit_urb failed: %d", ret);
}
-static int ghl_init_urb(struct sony_sc *sc, struct usb_device *usbdev)
+static int ghl_init_urb(struct sony_sc *sc, struct usb_device *usbdev,
+ const char ghl_magic_data[], u16 poke_size)
{
struct usb_ctrlrequest *cr;
- u16 poke_size;
u8 *databuf;
unsigned int pipe;
+ u16 ghl_magic_value = (((HID_OUTPUT_REPORT + 1) << 8) | ghl_magic_data[0]);
- poke_size = ARRAY_SIZE(ghl_ps3wiiu_magic_data);
pipe = usb_sndctrlpipe(usbdev, 0);
cr = devm_kzalloc(&sc->hdev->dev, sizeof(*cr), GFP_ATOMIC);
@@ -663,10 +674,10 @@ static int ghl_init_urb(struct sony_sc *sc, struct usb_device *usbdev)
cr->bRequestType =
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT;
cr->bRequest = USB_REQ_SET_CONFIGURATION;
- cr->wValue = cpu_to_le16(ghl_ps3wiiu_magic_value);
+ cr->wValue = cpu_to_le16(ghl_magic_value);
cr->wIndex = 0;
cr->wLength = cpu_to_le16(poke_size);
- memcpy(databuf, ghl_ps3wiiu_magic_data, poke_size);
+ memcpy(databuf, ghl_magic_data, poke_size);
usb_fill_control_urb(
sc->ghl_urb, usbdev, pipe,
(unsigned char *) cr, databuf, poke_size,
@@ -2974,7 +2985,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!strcmp(hdev->name, "FutureMax Dance Mat"))
quirks |= FUTUREMAX_DANCE_MAT;
- if (!strcmp(hdev->name, "SHANWAN PS3 GamePad"))
+ if (!strcmp(hdev->name, "SHANWAN PS3 GamePad") ||
+ !strcmp(hdev->name, "ShanWan PS(R) Ga`epad"))
quirks |= SHANWAN_GAMEPAD;
sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
@@ -3030,11 +3042,17 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
return -ENODEV;
}
- if (sc->quirks & GHL_GUITAR_PS3WIIU) {
+ if (sc->quirks & (GHL_GUITAR_PS3WIIU | GHL_GUITAR_PS4)) {
sc->ghl_urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!sc->ghl_urb)
return -ENOMEM;
- ret = ghl_init_urb(sc, usbdev);
+
+ if (sc->quirks & GHL_GUITAR_PS3WIIU)
+ ret = ghl_init_urb(sc, usbdev, ghl_ps3wiiu_magic_data,
+ ARRAY_SIZE(ghl_ps3wiiu_magic_data));
+ else if (sc->quirks & GHL_GUITAR_PS4)
+ ret = ghl_init_urb(sc, usbdev, ghl_ps4_magic_data,
+ ARRAY_SIZE(ghl_ps4_magic_data));
if (ret) {
hid_err(hdev, "error preparing URB\n");
return ret;
@@ -3052,7 +3070,7 @@ static void sony_remove(struct hid_device *hdev)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
- if (sc->quirks & GHL_GUITAR_PS3WIIU) {
+ if (sc->quirks & (GHL_GUITAR_PS3WIIU | GHL_GUITAR_PS4)) {
del_timer_sync(&sc->ghl_poke_timer);
usb_free_urb(sc->ghl_urb);
}
@@ -3172,11 +3190,14 @@ static const struct hid_device_id sony_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY_RHYTHM, USB_DEVICE_ID_SONY_PS3WIIU_GHLIVE_DONGLE),
.driver_data = GHL_GUITAR_PS3WIIU | GH_GUITAR_CONTROLLER },
/* Guitar Hero PC Guitar Dongle */
- { HID_USB_DEVICE(USB_VENDOR_ID_ACTIVISION, USB_DEVICE_ID_ACTIVISION_GUITAR_DONGLE),
+ { HID_USB_DEVICE(USB_VENDOR_ID_REDOCTANE, USB_DEVICE_ID_REDOCTANE_GUITAR_DONGLE),
.driver_data = GH_GUITAR_CONTROLLER },
/* Guitar Hero PS3 World Tour Guitar Dongle */
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY_RHYTHM, USB_DEVICE_ID_SONY_PS3_GUITAR_DONGLE),
.driver_data = GH_GUITAR_CONTROLLER },
+ /* Guitar Hero Live PS4 guitar dongles */
+ { HID_USB_DEVICE(USB_VENDOR_ID_REDOCTANE, USB_DEVICE_ID_REDOCTANE_PS4_GHLIVE_DONGLE),
+ .driver_data = GHL_GUITAR_PS4 | GH_GUITAR_CONTROLLER },
{ }
};
MODULE_DEVICE_TABLE(hid, sony_devices);
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index cdc7d82ae9ed..d44550aa8805 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -173,6 +173,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
if (ret) {
hid_err(hdev, "setup data couldn't be sent\n");
+ kfree(send_buf);
return;
}
}
@@ -253,6 +254,7 @@ static void thrustmaster_remove(struct hid_device *hdev)
usb_kill_urb(tm_wheel->urb);
+ kfree(tm_wheel->change_request);
kfree(tm_wheel->response);
kfree(tm_wheel->model_request);
usb_free_urb(tm_wheel->urb);
@@ -336,11 +338,14 @@ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_i
);
ret = usb_submit_urb(tm_wheel->urb, GFP_ATOMIC);
- if (ret)
+ if (ret) {
hid_err(hdev, "Error %d while submitting the URB. I am unable to initialize this wheel...\n", ret);
+ goto error6;
+ }
return ret;
+error6: kfree(tm_wheel->change_request);
error5: kfree(tm_wheel->response);
error4: kfree(tm_wheel->model_request);
error3: usb_free_urb(tm_wheel->urb);
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 46474612e73c..517141138b00 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -171,8 +171,6 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
{ I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
- { USB_VENDOR_ID_ELAN, HID_ANY_ID,
- I2C_HID_QUIRK_BOGUS_IRQ },
{ USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
I2C_HID_QUIRK_RESET_ON_RESUME },
{ I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393,
@@ -183,7 +181,8 @@ static const struct i2c_hid_quirks {
* Sending the wakeup after reset actually break ELAN touchscreen controller
*/
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
- I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
+ I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET |
+ I2C_HID_QUIRK_BOGUS_IRQ },
{ 0, 0 }
};
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
index ee0225982a82..52674149a275 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
@@ -26,28 +26,29 @@ struct i2c_hid_of_goodix {
struct i2chid_ops ops;
struct regulator *vdd;
+ struct notifier_block nb;
+ struct mutex regulator_mutex;
struct gpio_desc *reset_gpio;
const struct goodix_i2c_hid_timing_data *timings;
};
-static int goodix_i2c_hid_power_up(struct i2chid_ops *ops)
+static void goodix_i2c_hid_deassert_reset(struct i2c_hid_of_goodix *ihid_goodix,
+ bool regulator_just_turned_on)
{
- struct i2c_hid_of_goodix *ihid_goodix =
- container_of(ops, struct i2c_hid_of_goodix, ops);
- int ret;
-
- ret = regulator_enable(ihid_goodix->vdd);
- if (ret)
- return ret;
-
- if (ihid_goodix->timings->post_power_delay_ms)
+ if (regulator_just_turned_on && ihid_goodix->timings->post_power_delay_ms)
msleep(ihid_goodix->timings->post_power_delay_ms);
gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 0);
if (ihid_goodix->timings->post_gpio_reset_delay_ms)
msleep(ihid_goodix->timings->post_gpio_reset_delay_ms);
+}
- return 0;
+static int goodix_i2c_hid_power_up(struct i2chid_ops *ops)
+{
+ struct i2c_hid_of_goodix *ihid_goodix =
+ container_of(ops, struct i2c_hid_of_goodix, ops);
+
+ return regulator_enable(ihid_goodix->vdd);
}
static void goodix_i2c_hid_power_down(struct i2chid_ops *ops)
@@ -55,20 +56,54 @@ static void goodix_i2c_hid_power_down(struct i2chid_ops *ops)
struct i2c_hid_of_goodix *ihid_goodix =
container_of(ops, struct i2c_hid_of_goodix, ops);
- gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
regulator_disable(ihid_goodix->vdd);
}
+static int ihid_goodix_vdd_notify(struct notifier_block *nb,
+ unsigned long event,
+ void *ignored)
+{
+ struct i2c_hid_of_goodix *ihid_goodix =
+ container_of(nb, struct i2c_hid_of_goodix, nb);
+ int ret = NOTIFY_OK;
+
+ mutex_lock(&ihid_goodix->regulator_mutex);
+
+ switch (event) {
+ case REGULATOR_EVENT_PRE_DISABLE:
+ gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
+ break;
+
+ case REGULATOR_EVENT_ENABLE:
+ goodix_i2c_hid_deassert_reset(ihid_goodix, true);
+ break;
+
+ case REGULATOR_EVENT_ABORT_DISABLE:
+ goodix_i2c_hid_deassert_reset(ihid_goodix, false);
+ break;
+
+ default:
+ ret = NOTIFY_DONE;
+ break;
+ }
+
+ mutex_unlock(&ihid_goodix->regulator_mutex);
+
+ return ret;
+}
+
static int i2c_hid_of_goodix_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_hid_of_goodix *ihid_goodix;
-
+ int ret;
ihid_goodix = devm_kzalloc(&client->dev, sizeof(*ihid_goodix),
GFP_KERNEL);
if (!ihid_goodix)
return -ENOMEM;
+ mutex_init(&ihid_goodix->regulator_mutex);
+
ihid_goodix->ops.power_up = goodix_i2c_hid_power_up;
ihid_goodix->ops.power_down = goodix_i2c_hid_power_down;
@@ -84,6 +119,37 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
ihid_goodix->timings = device_get_match_data(&client->dev);
+ /*
+ * We need to control the "reset" line in lockstep with the regulator
+ * actually turning on an off instead of just when we make the request.
+ * This matters if the regulator is shared with another consumer.
+ * - If the regulator is off then we must assert reset. The reset
+ * line is active low and on some boards it could cause a current
+ * leak if left high.
+ * - If the regulator is on then we don't want reset asserted for very
+ * long. Holding the controller in reset apparently draws extra
+ * power.
+ */
+ mutex_lock(&ihid_goodix->regulator_mutex);
+ ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify;
+ ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb);
+ if (ret) {
+ mutex_unlock(&ihid_goodix->regulator_mutex);
+ return dev_err_probe(&client->dev, ret,
+ "regulator notifier request failed\n");
+ }
+
+ /*
+ * If someone else is holding the regulator on (or the regulator is
+ * an always-on one) we might never be told to deassert reset. Do it
+ * now. Here we'll assume that someone else might have _just
+ * barely_ turned the regulator on so we'll do the full
+ * "post_power_delay" just in case.
+ */
+ if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd))
+ goodix_i2c_hid_deassert_reset(ihid_goodix, true);
+ mutex_unlock(&ihid_goodix->regulator_mutex);
+
return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001);
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 06130dc431a0..2dcaf31eb9cd 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -377,27 +377,23 @@ static int hid_submit_ctrl(struct hid_device *hid)
len = hid_report_len(report);
if (dir == USB_DIR_OUT) {
usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
- usbhid->urbctrl->transfer_buffer_length = len;
if (raw_report) {
memcpy(usbhid->ctrlbuf, raw_report, len);
kfree(raw_report);
usbhid->ctrl[usbhid->ctrltail].raw_report = NULL;
}
} else {
- int maxpacket, padlen;
+ int maxpacket;
usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0);
maxpacket = usb_maxpacket(hid_to_usb_dev(hid),
usbhid->urbctrl->pipe, 0);
- if (maxpacket > 0) {
- padlen = DIV_ROUND_UP(len, maxpacket);
- padlen *= maxpacket;
- if (padlen > usbhid->bufsize)
- padlen = usbhid->bufsize;
- } else
- padlen = 0;
- usbhid->urbctrl->transfer_buffer_length = padlen;
+ len += (len == 0); /* Don't allow 0-length reports */
+ len = round_up(len, maxpacket);
+ if (len > usbhid->bufsize)
+ len = usbhid->bufsize;
}
+ usbhid->urbctrl->transfer_buffer_length = len;
usbhid->urbctrl->dev = hid_to_usb_dev(hid);
usbhid->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | dir;
@@ -505,7 +501,7 @@ static void hid_ctrl(struct urb *urb)
if (unplug) {
usbhid->ctrltail = usbhid->ctrlhead;
- } else {
+ } else if (usbhid->ctrlhead != usbhid->ctrltail) {
usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1);
if (usbhid->ctrlhead != usbhid->ctrltail &&
@@ -1223,9 +1219,20 @@ static void usbhid_stop(struct hid_device *hid)
mutex_lock(&usbhid->mutex);
clear_bit(HID_STARTED, &usbhid->iofl);
+
spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
set_bit(HID_DISCONNECTED, &usbhid->iofl);
+ while (usbhid->ctrltail != usbhid->ctrlhead) {
+ if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_OUT) {
+ kfree(usbhid->ctrl[usbhid->ctrltail].raw_report);
+ usbhid->ctrl[usbhid->ctrltail].raw_report = NULL;
+ }
+
+ usbhid->ctrltail = (usbhid->ctrltail + 1) &
+ (HID_CONTROL_FIFO_SIZE - 1);
+ }
spin_unlock_irq(&usbhid->lock);
+
usb_kill_urb(usbhid->urbin);
usb_kill_urb(usbhid->urbout);
usb_kill_urb(usbhid->urbctrl);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 57bfa0ae9836..93f49b766376 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2287,7 +2287,13 @@ static void wacom_set_shared_values(struct wacom_wac *wacom_wac)
if (wacom_wac->has_mute_touch_switch) {
wacom_wac->shared->has_mute_touch_switch = true;
- wacom_wac->shared->is_touch_on = true;
+ /* Hardware touch switch may be off. Wait until
+ * we know the switch state to decide is_touch_on.
+ * Softkey state should be initialized to "on" to
+ * match historic default.
+ */
+ if (wacom_wac->is_soft_touch_switch)
+ wacom_wac->shared->is_touch_on = true;
}
if (wacom_wac->shared->has_mute_touch_switch &&
@@ -2791,6 +2797,7 @@ static int wacom_probe(struct hid_device *hdev,
error);
}
+ wacom_wac->probe_complete = true;
return 0;
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 81ba642adcb7..fd51769d0994 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -824,6 +824,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
return 0;
}
+static inline bool touch_is_muted(struct wacom_wac *wacom_wac)
+{
+ return wacom_wac->probe_complete &&
+ wacom_wac->shared->has_mute_touch_switch &&
+ !wacom_wac->shared->is_touch_on;
+}
+
static inline bool report_touch_events(struct wacom_wac *wacom)
{
return (touch_arbitration ? !wacom->shared->stylus_in_proximity : 1);
@@ -1525,11 +1532,8 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET;
int y_offset = 2;
- if (wacom->shared->has_mute_touch_switch &&
- !wacom->shared->is_touch_on) {
- if (!wacom->shared->touch_down)
- return 0;
- }
+ if (touch_is_muted(wacom) && !wacom->shared->touch_down)
+ return 0;
if (wacom->features.type == WACOM_27QHDT) {
current_num_contacts = data[63];
@@ -1987,14 +1991,17 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
features->numbered_buttons++;
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
- case WACOM_HID_WD_TOUCHONOFF:
case WACOM_HID_WD_MUTE_DEVICE:
+ /* softkey touch switch */
+ wacom_wac->is_soft_touch_switch = true;
+ fallthrough;
+ case WACOM_HID_WD_TOUCHONOFF:
/*
- * This usage, which is used to mute touch events, comes
- * from the pad packet, but is reported on the touch
+ * These two usages, which are used to mute touch events, come
+ * from the pad packet, but are reported on the touch
* interface. Because the touch interface may not have
* been created yet, we cannot call wacom_map_usage(). In
- * order to process this usage when we receive it, we set
+ * order to process the usages when we receive them, we set
* the usage type and code directly.
*/
wacom_wac->has_mute_touch_switch = true;
@@ -2533,8 +2540,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
bool prox = hid_data->tipswitch &&
report_touch_events(wacom_wac);
- if (wacom_wac->shared->has_mute_touch_switch &&
- !wacom_wac->shared->is_touch_on) {
+ if (touch_is_muted(wacom_wac)) {
if (!wacom_wac->shared->touch_down)
return;
prox = false;
@@ -2548,8 +2554,17 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
int slot;
slot = input_mt_get_slot_by_key(input, hid_data->id);
- if (slot < 0)
+ if (slot < 0) {
return;
+ } else {
+ struct input_mt_slot *ps = &input->mt->slots[slot];
+ int mt_id = input_mt_get_value(ps, ABS_MT_TRACKING_ID);
+
+ if (!prox && mt_id < 0) {
+ // No data to send for this slot; short-circuit
+ return;
+ }
+ }
input_mt_slot(input, slot);
input_mt_report_slot_state(input, MT_TOOL_FINGER, prox);
@@ -2581,6 +2596,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
struct wacom_features *features = &wacom->wacom_wac.features;
+ if (touch_is_muted(wacom_wac) && !wacom_wac->shared->touch_down)
+ return;
+
if (wacom_wac->is_invalid_bt_frame)
return;
@@ -2630,6 +2648,9 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
struct hid_data* hid_data = &wacom_wac->hid_data;
int i;
+ if (touch_is_muted(wacom_wac) && !wacom_wac->shared->touch_down)
+ return;
+
wacom_wac->is_invalid_bt_frame = false;
for (i = 0; i < report->maxfield; i++) {
@@ -2681,6 +2702,10 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
struct input_dev *input = wacom_wac->touch_input;
unsigned touch_max = wacom_wac->features.touch_max;
+ /* if there was nothing to process, don't send an empty sync */
+ if (wacom_wac->hid_data.num_expected == 0)
+ return;
+
/* If more packets of data are expected, give us a chance to
* process them rather than immediately syncing a partial
* update.
@@ -3835,6 +3860,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
input_dev->evbit[0] |= BIT_MASK(EV_SW);
__set_bit(SW_MUTE_DEVICE, input_dev->swbit);
wacom_wac->has_mute_touch_switch = true;
+ wacom_wac->is_soft_touch_switch = true;
}
fallthrough;
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 8f16654eca09..8b2d4e5b2303 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -337,6 +337,7 @@ struct wacom_wac {
int tool[2];
int id[2];
__u64 serial[2];
+ bool probe_complete;
bool reporting_data;
struct wacom_features features;
struct wacom_shared *shared;
@@ -352,6 +353,7 @@ struct wacom_wac {
int mode_value;
struct hid_data hid_data;
bool has_mute_touch_switch;
+ bool is_soft_touch_switch;
bool has_mode_change;
bool is_direct_mode;
bool is_invalid_bt_frame;
diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
index 18da5a25e89a..868243dba1ee 100644
--- a/drivers/hwmon/mr75203.c
+++ b/drivers/hwmon/mr75203.c
@@ -17,6 +17,7 @@
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/units.h>
/* PVT Common register */
#define PVT_IP_CONFIG 0x04
@@ -37,7 +38,6 @@
#define CLK_SYNTH_EN BIT(24)
#define CLK_SYS_CYCLES_MAX 514
#define CLK_SYS_CYCLES_MIN 2
-#define HZ_PER_MHZ 1000000L
#define SDIF_DISABLE 0x04
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 6f0aa0ed3241..aaeeacc12121 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -69,6 +69,38 @@ bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
}
EXPORT_SYMBOL_GPL(i2c_acpi_get_i2c_resource);
+static int i2c_acpi_resource_count(struct acpi_resource *ares, void *data)
+{
+ struct acpi_resource_i2c_serialbus *sb;
+ int *count = data;
+
+ if (i2c_acpi_get_i2c_resource(ares, &sb))
+ *count = *count + 1;
+
+ return 1;
+}
+
+/**
+ * i2c_acpi_client_count - Count the number of I2cSerialBus resources
+ * @adev: ACPI device
+ *
+ * Returns the number of I2cSerialBus resources in the ACPI-device's
+ * resource-list; or a negative error code.
+ */
+int i2c_acpi_client_count(struct acpi_device *adev)
+{
+ int ret, count = 0;
+ LIST_HEAD(r);
+
+ ret = acpi_dev_get_resources(adev, &r, i2c_acpi_resource_count, &count);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&r);
+ return count;
+}
+EXPORT_SYMBOL_GPL(i2c_acpi_client_count);
+
static int i2c_acpi_fill_info(struct acpi_resource *ares, void *data)
{
struct i2c_acpi_lookup *lookup = data;
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index 043f199e7bc6..9b279937a24e 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -6,12 +6,11 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/time.h>
+#include <linux/units.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
-#define HZ_PER_MHZ 1000000L
-
static struct {
u32 usage_id;
int unit; /* 0 for default others from HID sensor spec */
diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
index 7b32dfaee9b3..3ba2378df3dd 100644
--- a/drivers/iio/light/as73211.c
+++ b/drivers/iio/light/as73211.c
@@ -24,8 +24,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm.h>
-
-#define HZ_PER_KHZ 1000
+#include <linux/units.h>
#define AS73211_DRV_NAME "as73211"
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index c9e9fc81447e..0c98dd3dee67 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1429,7 +1429,7 @@ int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
EXPORT_SYMBOL(rdma_read_gid_l2_fields);
static int config_non_roce_gid_cache(struct ib_device *device,
- u32 port, int gid_tbl_len)
+ u32 port, struct ib_port_attr *tprops)
{
struct ib_gid_attr gid_attr = {};
struct ib_gid_table *table;
@@ -1441,7 +1441,7 @@ static int config_non_roce_gid_cache(struct ib_device *device,
table = rdma_gid_table(device, port);
mutex_lock(&table->lock);
- for (i = 0; i < gid_tbl_len; ++i) {
+ for (i = 0; i < tprops->gid_tbl_len; ++i) {
if (!device->ops.query_gid)
continue;
ret = device->ops.query_gid(device, port, i, &gid_attr.gid);
@@ -1452,6 +1452,8 @@ static int config_non_roce_gid_cache(struct ib_device *device,
goto err;
}
gid_attr.index = i;
+ tprops->subnet_prefix =
+ be64_to_cpu(gid_attr.gid.global.subnet_prefix);
add_modify_gid(table, &gid_attr);
}
err:
@@ -1484,7 +1486,7 @@ ib_cache_update(struct ib_device *device, u32 port, bool update_gids,
if (!rdma_protocol_roce(device, port) && update_gids) {
ret = config_non_roce_gid_cache(device, port,
- tprops->gid_tbl_len);
+ tprops);
if (ret)
goto err;
}
@@ -1619,8 +1621,6 @@ int ib_cache_setup_one(struct ib_device *device)
u32 p;
int err;
- rwlock_init(&device->cache_lock);
-
err = gid_table_setup_one(device);
if (err)
return err;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 5d3b8b8d163d..c40791baced5 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3132,6 +3132,9 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
struct rdma_id_private *id_priv;
int ret;
+ if (!timeout_ms)
+ return -EINVAL;
+
id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
return -EINVAL;
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 647cca4e0240..f66f48d860ec 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -316,45 +316,13 @@ struct ib_device *ib_device_get_by_index(const struct net *net, u32 index);
void nldev_init(void);
void nldev_exit(void);
-static inline struct ib_qp *
-_ib_create_qp(struct ib_device *dev, struct ib_pd *pd,
- struct ib_qp_init_attr *attr, struct ib_udata *udata,
- struct ib_uqp_object *uobj, const char *caller)
-{
- struct ib_qp *qp;
-
- if (!dev->ops.create_qp)
- return ERR_PTR(-EOPNOTSUPP);
-
- qp = dev->ops.create_qp(pd, attr, udata);
- if (IS_ERR(qp))
- return qp;
-
- qp->device = dev;
- qp->pd = pd;
- qp->uobject = uobj;
- qp->real_qp = qp;
-
- qp->qp_type = attr->qp_type;
- qp->rwq_ind_tbl = attr->rwq_ind_tbl;
- qp->send_cq = attr->send_cq;
- qp->recv_cq = attr->recv_cq;
- qp->srq = attr->srq;
- qp->rwq_ind_tbl = attr->rwq_ind_tbl;
- qp->event_handler = attr->event_handler;
- qp->port = attr->port_num;
-
- atomic_set(&qp->usecnt, 0);
- spin_lock_init(&qp->mr_lock);
- INIT_LIST_HEAD(&qp->rdma_mrs);
- INIT_LIST_HEAD(&qp->sig_mrs);
-
- rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
- WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
- rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
- rdma_restrack_add(&qp->res);
- return qp;
-}
+struct ib_qp *ib_create_qp_user(struct ib_device *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata,
+ struct ib_uqp_object *uobj, const char *caller);
+
+void ib_qp_usecnt_inc(struct ib_qp *qp);
+void ib_qp_usecnt_dec(struct ib_qp *qp);
struct rdma_dev_addr;
int rdma_resolve_ip_route(struct sockaddr *src_addr,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index fa20b1824fb8..f4814bb7f082 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -607,6 +607,8 @@ struct ib_device *_ib_alloc_device(size_t size)
for (i = 0; i < ARRAY_SIZE(device->cq_pools); i++)
INIT_LIST_HEAD(&device->cq_pools[i]);
+ rwlock_init(&device->cache_lock);
+
device->uverbs_cmd_mask =
BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
@@ -2050,7 +2052,6 @@ static int __ib_query_port(struct ib_device *device,
u32 port_num,
struct ib_port_attr *port_attr)
{
- union ib_gid gid = {};
int err;
memset(port_attr, 0, sizeof(*port_attr));
@@ -2063,11 +2064,8 @@ static int __ib_query_port(struct ib_device *device,
IB_LINK_LAYER_INFINIBAND)
return 0;
- err = device->ops.query_gid(device, port_num, 0, &gid);
- if (err)
- return err;
-
- port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
+ ib_get_cached_subnet_prefix(device, port_num,
+ &port_attr->subnet_prefix);
return 0;
}
@@ -2656,6 +2654,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, get_hw_stats);
SET_DEVICE_OP(dev_ops, get_link_layer);
SET_DEVICE_OP(dev_ops, get_netdev);
+ SET_DEVICE_OP(dev_ops, get_numa_node);
SET_DEVICE_OP(dev_ops, get_port_immutable);
SET_DEVICE_OP(dev_ops, get_vector_affinity);
SET_DEVICE_OP(dev_ops, get_vf_config);
@@ -2712,6 +2711,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_OBJ_SIZE(dev_ops, ib_cq);
SET_OBJ_SIZE(dev_ops, ib_mw);
SET_OBJ_SIZE(dev_ops, ib_pd);
+ SET_OBJ_SIZE(dev_ops, ib_qp);
SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
SET_OBJ_SIZE(dev_ops, ib_srq);
SET_OBJ_SIZE(dev_ops, ib_ucontext);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 42261152b489..2b47073c61a6 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -1186,29 +1186,34 @@ static int __init iw_cm_init(void)
ret = iwpm_init(RDMA_NL_IWCM);
if (ret)
- pr_err("iw_cm: couldn't init iwpm\n");
- else
- rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
+ return ret;
+
iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
if (!iwcm_wq)
- return -ENOMEM;
+ goto err_alloc;
iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
iwcm_ctl_table);
if (!iwcm_ctl_table_hdr) {
pr_err("iw_cm: couldn't register sysctl paths\n");
- destroy_workqueue(iwcm_wq);
- return -ENOMEM;
+ goto err_sysctl;
}
+ rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
return 0;
+
+err_sysctl:
+ destroy_workqueue(iwcm_wq);
+err_alloc:
+ iwpm_exit(RDMA_NL_IWCM);
+ return -ENOMEM;
}
static void __exit iw_cm_cleanup(void)
{
+ rdma_nl_unregister(RDMA_NL_IWCM);
unregister_net_sysctl_table(iwcm_ctl_table_hdr);
destroy_workqueue(iwcm_wq);
- rdma_nl_unregister(RDMA_NL_IWCM);
iwpm_exit(RDMA_NL_IWCM);
}
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 12a9816fc0e2..3c9a9869212b 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -69,10 +69,6 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
const char *err_str = "";
int ret = -EINVAL;
- if (!iwpm_valid_client(nl_client)) {
- err_str = "Invalid port mapper client";
- goto pid_query_error;
- }
if (iwpm_check_registration(nl_client, IWPM_REG_VALID) ||
iwpm_user_pid == IWPM_PID_UNAVAILABLE)
return 0;
@@ -153,10 +149,6 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
const char *err_str = "";
int ret = -EINVAL;
- if (!iwpm_valid_client(nl_client)) {
- err_str = "Invalid port mapper client";
- goto add_mapping_error;
- }
if (!iwpm_valid_pid())
return 0;
if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
@@ -240,10 +232,6 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
const char *err_str = "";
int ret = -EINVAL;
- if (!iwpm_valid_client(nl_client)) {
- err_str = "Invalid port mapper client";
- goto query_mapping_error;
- }
if (!iwpm_valid_pid())
return 0;
if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
@@ -331,10 +319,6 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
const char *err_str = "";
int ret = -EINVAL;
- if (!iwpm_valid_client(nl_client)) {
- err_str = "Invalid port mapper client";
- goto remove_mapping_error;
- }
if (!iwpm_valid_pid())
return 0;
if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) {
@@ -444,8 +428,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
__func__, iwpm_user_pid);
- if (iwpm_valid_client(nl_client))
- iwpm_set_registration(nl_client, IWPM_REG_VALID);
+ iwpm_set_registration(nl_client, IWPM_REG_VALID);
register_pid_response_exit:
nlmsg_request->request_done = 1;
/* always for found nlmsg_request */
@@ -649,11 +632,6 @@ int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
return ret;
nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
- if (!iwpm_valid_client(nl_client)) {
- pr_info("%s: Invalid port mapper client = %u\n",
- __func__, nl_client);
- return ret;
- }
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
local_sockaddr = (struct sockaddr_storage *)
@@ -736,11 +714,6 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
return ret;
}
nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
- if (!iwpm_valid_client(nl_client)) {
- pr_info("%s: Invalid port mapper client = %u\n",
- __func__, nl_client);
- return ret;
- }
iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
iwpm_user_pid = cb->nlh->nlmsg_pid;
@@ -863,11 +836,6 @@ int iwpm_hello_cb(struct sk_buff *skb, struct netlink_callback *cb)
}
abi_version = nla_get_u16(nltb[IWPM_NLA_HELLO_ABI_VERSION]);
nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
- if (!iwpm_valid_client(nl_client)) {
- pr_info("%s: Invalid port mapper client = %u\n",
- __func__, nl_client);
- return ret;
- }
iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
iwpm_ulib_version = min_t(u16, IWPM_UABI_VERSION, abi_version);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 3f8c019c7260..54f4feb604d8 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -48,7 +48,6 @@ static DEFINE_SPINLOCK(iwpm_mapinfo_lock);
static struct hlist_head *iwpm_reminfo_bucket;
static DEFINE_SPINLOCK(iwpm_reminfo_lock);
-static DEFINE_MUTEX(iwpm_admin_lock);
static struct iwpm_admin_data iwpm_admin;
/**
@@ -59,39 +58,21 @@ static struct iwpm_admin_data iwpm_admin;
*/
int iwpm_init(u8 nl_client)
{
- int ret = 0;
- mutex_lock(&iwpm_admin_lock);
- if (!refcount_read(&iwpm_admin.refcount)) {
- iwpm_hash_bucket = kcalloc(IWPM_MAPINFO_HASH_SIZE,
- sizeof(struct hlist_head),
- GFP_KERNEL);
- if (!iwpm_hash_bucket) {
- ret = -ENOMEM;
- goto init_exit;
- }
- iwpm_reminfo_bucket = kcalloc(IWPM_REMINFO_HASH_SIZE,
- sizeof(struct hlist_head),
- GFP_KERNEL);
- if (!iwpm_reminfo_bucket) {
- kfree(iwpm_hash_bucket);
- ret = -ENOMEM;
- goto init_exit;
- }
+ iwpm_hash_bucket = kcalloc(IWPM_MAPINFO_HASH_SIZE,
+ sizeof(struct hlist_head), GFP_KERNEL);
+ if (!iwpm_hash_bucket)
+ return -ENOMEM;
- refcount_set(&iwpm_admin.refcount, 1);
- } else {
- refcount_inc(&iwpm_admin.refcount);
+ iwpm_reminfo_bucket = kcalloc(IWPM_REMINFO_HASH_SIZE,
+ sizeof(struct hlist_head), GFP_KERNEL);
+ if (!iwpm_reminfo_bucket) {
+ kfree(iwpm_hash_bucket);
+ return -ENOMEM;
}
-init_exit:
- mutex_unlock(&iwpm_admin_lock);
- if (!ret) {
- iwpm_set_valid(nl_client, 1);
- iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
- pr_debug("%s: Mapinfo and reminfo tables are created\n",
- __func__);
- }
- return ret;
+ iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
+ pr_debug("%s: Mapinfo and reminfo tables are created\n", __func__);
+ return 0;
}
static void free_hash_bucket(void);
@@ -105,22 +86,9 @@ static void free_reminfo_bucket(void);
*/
int iwpm_exit(u8 nl_client)
{
-
- if (!iwpm_valid_client(nl_client))
- return -EINVAL;
- mutex_lock(&iwpm_admin_lock);
- if (!refcount_read(&iwpm_admin.refcount)) {
- mutex_unlock(&iwpm_admin_lock);
- pr_err("%s Incorrect usage - negative refcount\n", __func__);
- return -EINVAL;
- }
- if (refcount_dec_and_test(&iwpm_admin.refcount)) {
- free_hash_bucket();
- free_reminfo_bucket();
- pr_debug("%s: Resources are destroyed\n", __func__);
- }
- mutex_unlock(&iwpm_admin_lock);
- iwpm_set_valid(nl_client, 0);
+ free_hash_bucket();
+ free_reminfo_bucket();
+ pr_debug("%s: Resources are destroyed\n", __func__);
iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
return 0;
}
@@ -145,8 +113,6 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
unsigned long flags;
int ret = -EINVAL;
- if (!iwpm_valid_client(nl_client))
- return ret;
map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
if (!map_info)
return -ENOMEM;
@@ -306,10 +272,6 @@ int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
unsigned long flags;
int ret = -EINVAL;
- if (!iwpm_valid_client(nl_client)) {
- pr_info("%s: Invalid client = %u\n", __func__, nl_client);
- return ret;
- }
spin_lock_irqsave(&iwpm_reminfo_lock, flags);
if (iwpm_reminfo_bucket) {
hash_bucket_head = get_reminfo_hash_bucket(
@@ -424,16 +386,6 @@ int iwpm_get_nlmsg_seq(void)
return atomic_inc_return(&iwpm_admin.nlmsg_seq);
}
-int iwpm_valid_client(u8 nl_client)
-{
- return iwpm_admin.client_list[nl_client];
-}
-
-void iwpm_set_valid(u8 nl_client, int valid)
-{
- iwpm_admin.client_list[nl_client] = valid;
-}
-
/* valid client */
u32 iwpm_get_registration(u8 nl_client)
{
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index e201835de733..3a42ad43056e 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -90,9 +90,7 @@ struct iwpm_remote_info {
};
struct iwpm_admin_data {
- refcount_t refcount;
atomic_t nlmsg_seq;
- int client_list[RDMA_NL_NUM_CLIENTS];
u32 reg_list[RDMA_NL_NUM_CLIENTS];
};
@@ -148,22 +146,6 @@ int iwpm_get_nlmsg_seq(void);
void iwpm_add_remote_info(struct iwpm_remote_info *reminfo);
/**
- * iwpm_valid_client - Check if the port mapper client is valid
- * @nl_client: The index of the netlink client
- *
- * Valid clients need to call iwpm_init() before using
- * the port mapper
- */
-int iwpm_valid_client(u8 nl_client);
-
-/**
- * iwpm_set_valid - Set the port mapper client to valid or not
- * @nl_client: The index of the netlink client
- * @valid: 1 if valid or 0 if invalid
- */
-void iwpm_set_valid(u8 nl_client, int valid);
-
-/**
* iwpm_check_registration - Check if the client registration
* matches the given one
* @nl_client: The index of the netlink client
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index 033207882c82..1f935d9f6178 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -343,7 +343,7 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
rt = &dev->res[res->type];
old = xa_erase(&rt->xa, res->id);
- if (res->type == RDMA_RESTRACK_MR || res->type == RDMA_RESTRACK_QP)
+ if (res->type == RDMA_RESTRACK_MR)
return;
WARN_ON(old != res);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index b61576f702b8..a20b8108e160 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -123,12 +123,6 @@ struct ib_sa_query {
#define IB_SA_CANCEL 0x00000002
#define IB_SA_QUERY_OPA 0x00000004
-struct ib_sa_service_query {
- void (*callback)(int, struct ib_sa_service_rec *, void *);
- void *context;
- struct ib_sa_query sa_query;
-};
-
struct ib_sa_path_query {
void (*callback)(int, struct sa_path_rec *, void *);
void *context;
@@ -502,54 +496,6 @@ static const struct ib_field mcmember_rec_table[] = {
.size_bits = 23 },
};
-#define SERVICE_REC_FIELD(field) \
- .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
- .struct_size_bytes = sizeof_field(struct ib_sa_service_rec, field), \
- .field_name = "sa_service_rec:" #field
-
-static const struct ib_field service_rec_table[] = {
- { SERVICE_REC_FIELD(id),
- .offset_words = 0,
- .offset_bits = 0,
- .size_bits = 64 },
- { SERVICE_REC_FIELD(gid),
- .offset_words = 2,
- .offset_bits = 0,
- .size_bits = 128 },
- { SERVICE_REC_FIELD(pkey),
- .offset_words = 6,
- .offset_bits = 0,
- .size_bits = 16 },
- { SERVICE_REC_FIELD(lease),
- .offset_words = 7,
- .offset_bits = 0,
- .size_bits = 32 },
- { SERVICE_REC_FIELD(key),
- .offset_words = 8,
- .offset_bits = 0,
- .size_bits = 128 },
- { SERVICE_REC_FIELD(name),
- .offset_words = 12,
- .offset_bits = 0,
- .size_bits = 64*8 },
- { SERVICE_REC_FIELD(data8),
- .offset_words = 28,
- .offset_bits = 0,
- .size_bits = 16*8 },
- { SERVICE_REC_FIELD(data16),
- .offset_words = 32,
- .offset_bits = 0,
- .size_bits = 8*16 },
- { SERVICE_REC_FIELD(data32),
- .offset_words = 36,
- .offset_bits = 0,
- .size_bits = 4*32 },
- { SERVICE_REC_FIELD(data64),
- .offset_words = 40,
- .offset_bits = 0,
- .size_bits = 2*64 },
-};
-
#define CLASSPORTINFO_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
.struct_size_bytes = sizeof_field(struct ib_class_port_info, field), \
@@ -1358,6 +1304,7 @@ static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
{
unsigned long flags;
int ret, id;
+ const int nmbr_sa_query_retries = 10;
xa_lock_irqsave(&queries, flags);
ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
@@ -1365,7 +1312,13 @@ static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
if (ret < 0)
return ret;
- query->mad_buf->timeout_ms = timeout_ms;
+ query->mad_buf->timeout_ms = timeout_ms / nmbr_sa_query_retries;
+ query->mad_buf->retries = nmbr_sa_query_retries;
+ if (!query->mad_buf->timeout_ms) {
+ /* Special case, very small timeout_ms */
+ query->mad_buf->timeout_ms = 1;
+ query->mad_buf->retries = timeout_ms;
+ }
query->mad_buf->context[0] = query;
query->id = id;
@@ -1634,129 +1587,6 @@ err1:
}
EXPORT_SYMBOL(ib_sa_path_rec_get);
-static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
- int status,
- struct ib_sa_mad *mad)
-{
- struct ib_sa_service_query *query =
- container_of(sa_query, struct ib_sa_service_query, sa_query);
-
- if (mad) {
- struct ib_sa_service_rec rec;
-
- ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
- mad->data, &rec);
- query->callback(status, &rec, query->context);
- } else
- query->callback(status, NULL, query->context);
-}
-
-static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
-{
- kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
-}
-
-/**
- * ib_sa_service_rec_query - Start Service Record operation
- * @client:SA client
- * @device:device to send request on
- * @port_num: port number to send request on
- * @method:SA method - should be get, set, or delete
- * @rec:Service Record to send in request
- * @comp_mask:component mask to send in request
- * @timeout_ms:time to wait for response
- * @gfp_mask:GFP mask to use for internal allocations
- * @callback:function called when request completes, times out or is
- * canceled
- * @context:opaque user context passed to callback
- * @sa_query:request context, used to cancel request
- *
- * Send a Service Record set/get/delete to the SA to register,
- * unregister or query a service record.
- * The callback function will be called when the request completes (or
- * fails); status is 0 for a successful response, -EINTR if the query
- * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
- * occurred sending the query. The resp parameter of the callback is
- * only valid if status is 0.
- *
- * If the return value of ib_sa_service_rec_query() is negative, it is an
- * error code. Otherwise it is a request ID that can be used to cancel
- * the query.
- */
-int ib_sa_service_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u32 port_num, u8 method,
- struct ib_sa_service_rec *rec,
- ib_sa_comp_mask comp_mask,
- unsigned long timeout_ms, gfp_t gfp_mask,
- void (*callback)(int status,
- struct ib_sa_service_rec *resp,
- void *context),
- void *context,
- struct ib_sa_query **sa_query)
-{
- struct ib_sa_service_query *query;
- struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
- struct ib_sa_port *port;
- struct ib_mad_agent *agent;
- struct ib_sa_mad *mad;
- int ret;
-
- if (!sa_dev)
- return -ENODEV;
-
- port = &sa_dev->port[port_num - sa_dev->start_port];
- agent = port->agent;
-
- if (method != IB_MGMT_METHOD_GET &&
- method != IB_MGMT_METHOD_SET &&
- method != IB_SA_METHOD_DELETE)
- return -EINVAL;
-
- query = kzalloc(sizeof(*query), gfp_mask);
- if (!query)
- return -ENOMEM;
-
- query->sa_query.port = port;
- ret = alloc_mad(&query->sa_query, gfp_mask);
- if (ret)
- goto err1;
-
- ib_sa_client_get(client);
- query->sa_query.client = client;
- query->callback = callback;
- query->context = context;
-
- mad = query->sa_query.mad_buf->mad;
- init_mad(&query->sa_query, agent);
-
- query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
- query->sa_query.release = ib_sa_service_rec_release;
- mad->mad_hdr.method = method;
- mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
- mad->sa_hdr.comp_mask = comp_mask;
-
- ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
- rec, mad->data);
-
- *sa_query = &query->sa_query;
-
- ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
- if (ret < 0)
- goto err2;
-
- return ret;
-
-err2:
- *sa_query = NULL;
- ib_sa_client_put(query->sa_query.client);
- free_mad(&query->sa_query);
-
-err1:
- kfree(query);
- return ret;
-}
-EXPORT_SYMBOL(ib_sa_service_rec_query);
-
static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
int status,
struct ib_sa_mad *mad)
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 0eb40025075f..86d479772fbc 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -51,15 +51,15 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
struct scatterlist *sg;
unsigned int i;
- if (umem->nmap > 0)
- ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
- DMA_BIDIRECTIONAL);
+ if (dirty)
+ ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt,
+ DMA_BIDIRECTIONAL, 0);
- for_each_sg(umem->sg_head.sgl, sg, umem->sg_nents, i)
+ for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i)
unpin_user_page_range_dirty_lock(sg_page(sg),
DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
- sg_free_table(&umem->sg_head);
+ sg_free_append_table(&umem->sgt_append);
}
/**
@@ -111,7 +111,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
/* offset into first SGL */
pgoff = umem->address & ~PAGE_MASK;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+ for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
/* Walk SGL and reduce max page size if VA/PA bits differ
* for any address.
*/
@@ -121,7 +121,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
* the maximum possible page size as the low bits of the iova
* must be zero when starting the next chunk.
*/
- if (i != (umem->nmap - 1))
+ if (i != (umem->sgt_append.sgt.nents - 1))
mask |= va;
pgoff = 0;
}
@@ -155,8 +155,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
unsigned long dma_attr = 0;
struct mm_struct *mm;
unsigned long npages;
- int ret;
- struct scatterlist *sg = NULL;
+ int pinned, ret;
unsigned int gup_flags = FOLL_WRITE;
/*
@@ -216,24 +215,24 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
while (npages) {
cond_resched();
- ret = pin_user_pages_fast(cur_base,
+ pinned = pin_user_pages_fast(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE /
sizeof(struct page *)),
gup_flags | FOLL_LONGTERM, page_list);
- if (ret < 0)
+ if (pinned < 0) {
+ ret = pinned;
goto umem_release;
+ }
- cur_base += ret * PAGE_SIZE;
- npages -= ret;
- sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret,
- 0, ret << PAGE_SHIFT,
- ib_dma_max_seg_size(device), sg, npages,
- GFP_KERNEL);
- umem->sg_nents = umem->sg_head.nents;
- if (IS_ERR(sg)) {
- unpin_user_pages_dirty_lock(page_list, ret, 0);
- ret = PTR_ERR(sg);
+ cur_base += pinned * PAGE_SIZE;
+ npages -= pinned;
+ ret = sg_alloc_append_table_from_pages(
+ &umem->sgt_append, page_list, pinned, 0,
+ pinned << PAGE_SHIFT, ib_dma_max_seg_size(device),
+ npages, GFP_KERNEL);
+ if (ret) {
+ unpin_user_pages_dirty_lock(page_list, pinned, 0);
goto umem_release;
}
}
@@ -241,16 +240,10 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
if (access & IB_ACCESS_RELAXED_ORDERING)
dma_attr |= DMA_ATTR_WEAK_ORDERING;
- umem->nmap =
- ib_dma_map_sg_attrs(device, umem->sg_head.sgl, umem->sg_nents,
- DMA_BIDIRECTIONAL, dma_attr);
-
- if (!umem->nmap) {
- ret = -ENOMEM;
+ ret = ib_dma_map_sgtable_attrs(device, &umem->sgt_append.sgt,
+ DMA_BIDIRECTIONAL, dma_attr);
+ if (ret)
goto umem_release;
- }
-
- ret = 0;
goto out;
umem_release:
@@ -310,7 +303,8 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
return -EINVAL;
}
- ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length,
+ ret = sg_pcopy_to_buffer(umem->sgt_append.sgt.sgl,
+ umem->sgt_append.sgt.orig_nents, dst, length,
offset + ib_umem_offset(umem));
if (ret < 0)
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index c6e875619fac..e824baf4640d 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -55,9 +55,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
cur += sg_dma_len(sg);
}
- umem_dmabuf->umem.sg_head.sgl = umem_dmabuf->first_sg;
- umem_dmabuf->umem.sg_head.nents = nmap;
- umem_dmabuf->umem.nmap = nmap;
+ umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
+ umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
umem_dmabuf->sgt = sgt;
wait_fence:
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 9462dbe66014..7a47343d11f9 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -292,9 +292,6 @@ EXPORT_SYMBOL(ib_umem_odp_release);
* @dma_index: index in the umem to add the dma to.
* @page: the page struct to map and add.
* @access_mask: access permissions needed for this page.
- * @current_seq: sequence number for synchronization with invalidations.
- * the sequence number is taken from
- * umem_odp->notifiers_seq.
*
* The function returns -EFAULT if the DMA mapping operation fails.
*
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 8c8ca7bce3ca..740e6b2efe0e 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1435,35 +1435,13 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
attr.source_qpn = cmd->source_qpn;
}
- if (cmd->qp_type == IB_QPT_XRC_TGT)
- qp = ib_create_qp(pd, &attr);
- else
- qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata, obj,
- NULL);
-
+ qp = ib_create_qp_user(device, pd, &attr, &attrs->driver_udata, obj,
+ KBUILD_MODNAME);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
goto err_put;
}
-
- if (cmd->qp_type != IB_QPT_XRC_TGT) {
- ret = ib_create_qp_security(qp, device);
- if (ret)
- goto err_cb;
-
- atomic_inc(&pd->usecnt);
- if (attr.send_cq)
- atomic_inc(&attr.send_cq->usecnt);
- if (attr.recv_cq)
- atomic_inc(&attr.recv_cq->usecnt);
- if (attr.srq)
- atomic_inc(&attr.srq->usecnt);
- if (ind_tbl)
- atomic_inc(&ind_tbl->usecnt);
- } else {
- /* It is done in _ib_create_qp for other QP types */
- qp->uobject = obj;
- }
+ ib_qp_usecnt_inc(qp);
obj->uevent.uobject.object = qp;
obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
@@ -1502,9 +1480,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
resp.response_length = uverbs_response_length(attrs, sizeof(resp));
return uverbs_response(attrs, &resp, sizeof(resp));
-err_cb:
- ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
-
err_put:
if (!IS_ERR(xrcd_uobj))
uobj_put_read(xrcd_uobj);
diff --git a/drivers/infiniband/core/uverbs_std_types_qp.c b/drivers/infiniband/core/uverbs_std_types_qp.c
index c00cfb5ed387..dd1075466f61 100644
--- a/drivers/infiniband/core/uverbs_std_types_qp.c
+++ b/drivers/infiniband/core/uverbs_std_types_qp.c
@@ -248,44 +248,23 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QP_CREATE)(
set_caps(&attr, &cap, true);
mutex_init(&obj->mcast_lock);
- if (attr.qp_type == IB_QPT_XRC_TGT)
- qp = ib_create_qp(pd, &attr);
- else
- qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata, obj,
- NULL);
-
+ qp = ib_create_qp_user(device, pd, &attr, &attrs->driver_udata, obj,
+ KBUILD_MODNAME);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
goto err_put;
}
+ ib_qp_usecnt_inc(qp);
- if (attr.qp_type != IB_QPT_XRC_TGT) {
- atomic_inc(&pd->usecnt);
- if (attr.send_cq)
- atomic_inc(&attr.send_cq->usecnt);
- if (attr.recv_cq)
- atomic_inc(&attr.recv_cq->usecnt);
- if (attr.srq)
- atomic_inc(&attr.srq->usecnt);
- if (attr.rwq_ind_tbl)
- atomic_inc(&attr.rwq_ind_tbl->usecnt);
- } else {
+ if (attr.qp_type == IB_QPT_XRC_TGT) {
obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
uobject);
atomic_inc(&obj->uxrcd->refcnt);
- /* It is done in _ib_create_qp for other QP types */
- qp->uobject = obj;
}
obj->uevent.uobject.object = qp;
uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_QP_HANDLE);
- if (attr.qp_type != IB_QPT_XRC_TGT) {
- ret = ib_create_qp_security(qp, device);
- if (ret)
- return ret;
- }
-
set_caps(&attr, &cap, false);
ret = uverbs_copy_to_struct_or_zero(attrs,
UVERBS_ATTR_CREATE_QP_RESP_CAP, &cap,
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 7036967e4c0b..89a2b21976d6 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1035,7 +1035,8 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
}
if (srq->srq_type == IB_SRQT_XRC) {
srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
- atomic_inc(&srq->ext.xrc.xrcd->usecnt);
+ if (srq->ext.xrc.xrcd)
+ atomic_inc(&srq->ext.xrc.xrcd->usecnt);
}
atomic_inc(&pd->usecnt);
@@ -1046,7 +1047,7 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
if (ret) {
rdma_restrack_put(&srq->res);
atomic_dec(&srq->pd->usecnt);
- if (srq->srq_type == IB_SRQT_XRC)
+ if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
atomic_dec(&srq->ext.xrc.xrcd->usecnt);
if (ib_srq_has_cq(srq->srq_type))
atomic_dec(&srq->ext.cq->usecnt);
@@ -1090,7 +1091,7 @@ int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
return ret;
atomic_dec(&srq->pd->usecnt);
- if (srq->srq_type == IB_SRQT_XRC)
+ if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
atomic_dec(&srq->ext.xrc.xrcd->usecnt);
if (ib_srq_has_cq(srq->srq_type))
atomic_dec(&srq->ext.cq->usecnt);
@@ -1199,34 +1200,142 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
return qp;
}
+static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata,
+ struct ib_uqp_object *uobj, const char *caller)
+{
+ struct ib_udata dummy = {};
+ struct ib_qp *qp;
+ int ret;
+
+ if (!dev->ops.create_qp)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ qp = rdma_zalloc_drv_obj_numa(dev, ib_qp);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ qp->device = dev;
+ qp->pd = pd;
+ qp->uobject = uobj;
+ qp->real_qp = qp;
+
+ qp->qp_type = attr->qp_type;
+ qp->rwq_ind_tbl = attr->rwq_ind_tbl;
+ qp->srq = attr->srq;
+ qp->event_handler = attr->event_handler;
+ qp->port = attr->port_num;
+ qp->qp_context = attr->qp_context;
+
+ spin_lock_init(&qp->mr_lock);
+ INIT_LIST_HEAD(&qp->rdma_mrs);
+ INIT_LIST_HEAD(&qp->sig_mrs);
+
+ rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
+ WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
+ rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
+ ret = dev->ops.create_qp(qp, attr, udata);
+ if (ret)
+ goto err_create;
+
+ /*
+ * TODO: The mlx4 internally overwrites send_cq and recv_cq.
+ * Unfortunately, it is not an easy task to fix that driver.
+ */
+ qp->send_cq = attr->send_cq;
+ qp->recv_cq = attr->recv_cq;
+
+ ret = ib_create_qp_security(qp, dev);
+ if (ret)
+ goto err_security;
+
+ rdma_restrack_add(&qp->res);
+ return qp;
+
+err_security:
+ qp->device->ops.destroy_qp(qp, udata ? &dummy : NULL);
+err_create:
+ rdma_restrack_put(&qp->res);
+ kfree(qp);
+ return ERR_PTR(ret);
+
+}
+
/**
- * ib_create_named_qp - Creates a kernel QP associated with the specified protection
+ * ib_create_qp_user - Creates a QP associated with the specified protection
* domain.
+ * @dev: IB device
* @pd: The protection domain associated with the QP.
- * @qp_init_attr: A list of initial attributes required to create the
+ * @attr: A list of initial attributes required to create the
* QP. If QP creation succeeds, then the attributes are updated to
* the actual capabilities of the created QP.
+ * @udata: User data
+ * @uobj: uverbs obect
* @caller: caller's build-time module name
- *
- * NOTE: for user qp use ib_create_qp_user with valid udata!
*/
-struct ib_qp *ib_create_named_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr,
- const char *caller)
+struct ib_qp *ib_create_qp_user(struct ib_device *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata,
+ struct ib_uqp_object *uobj, const char *caller)
{
- struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
- struct ib_qp *qp;
- int ret;
+ struct ib_qp *qp, *xrc_qp;
- if (qp_init_attr->rwq_ind_tbl &&
- (qp_init_attr->recv_cq ||
- qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
- qp_init_attr->cap.max_recv_sge))
- return ERR_PTR(-EINVAL);
+ if (attr->qp_type == IB_QPT_XRC_TGT)
+ qp = create_qp(dev, pd, attr, NULL, NULL, caller);
+ else
+ qp = create_qp(dev, pd, attr, udata, uobj, NULL);
+ if (attr->qp_type != IB_QPT_XRC_TGT || IS_ERR(qp))
+ return qp;
- if ((qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) &&
- !(device->attrs.device_cap_flags & IB_DEVICE_INTEGRITY_HANDOVER))
- return ERR_PTR(-EINVAL);
+ xrc_qp = create_xrc_qp_user(qp, attr);
+ if (IS_ERR(xrc_qp)) {
+ ib_destroy_qp(qp);
+ return xrc_qp;
+ }
+
+ xrc_qp->uobject = uobj;
+ return xrc_qp;
+}
+EXPORT_SYMBOL(ib_create_qp_user);
+
+void ib_qp_usecnt_inc(struct ib_qp *qp)
+{
+ if (qp->pd)
+ atomic_inc(&qp->pd->usecnt);
+ if (qp->send_cq)
+ atomic_inc(&qp->send_cq->usecnt);
+ if (qp->recv_cq)
+ atomic_inc(&qp->recv_cq->usecnt);
+ if (qp->srq)
+ atomic_inc(&qp->srq->usecnt);
+ if (qp->rwq_ind_tbl)
+ atomic_inc(&qp->rwq_ind_tbl->usecnt);
+}
+EXPORT_SYMBOL(ib_qp_usecnt_inc);
+
+void ib_qp_usecnt_dec(struct ib_qp *qp)
+{
+ if (qp->rwq_ind_tbl)
+ atomic_dec(&qp->rwq_ind_tbl->usecnt);
+ if (qp->srq)
+ atomic_dec(&qp->srq->usecnt);
+ if (qp->recv_cq)
+ atomic_dec(&qp->recv_cq->usecnt);
+ if (qp->send_cq)
+ atomic_dec(&qp->send_cq->usecnt);
+ if (qp->pd)
+ atomic_dec(&qp->pd->usecnt);
+}
+EXPORT_SYMBOL(ib_qp_usecnt_dec);
+
+struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ const char *caller)
+{
+ struct ib_device *device = pd->device;
+ struct ib_qp *qp;
+ int ret;
/*
* If the callers is using the RDMA API calculate the resources
@@ -1237,47 +1346,11 @@ struct ib_qp *ib_create_named_qp(struct ib_pd *pd,
if (qp_init_attr->cap.max_rdma_ctxs)
rdma_rw_init_qp(device, qp_init_attr);
- qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL, caller);
+ qp = create_qp(device, pd, qp_init_attr, NULL, NULL, caller);
if (IS_ERR(qp))
return qp;
- ret = ib_create_qp_security(qp, device);
- if (ret)
- goto err;
-
- if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
- struct ib_qp *xrc_qp =
- create_xrc_qp_user(qp, qp_init_attr);
-
- if (IS_ERR(xrc_qp)) {
- ret = PTR_ERR(xrc_qp);
- goto err;
- }
- return xrc_qp;
- }
-
- qp->event_handler = qp_init_attr->event_handler;
- qp->qp_context = qp_init_attr->qp_context;
- if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
- qp->recv_cq = NULL;
- qp->srq = NULL;
- } else {
- qp->recv_cq = qp_init_attr->recv_cq;
- if (qp_init_attr->recv_cq)
- atomic_inc(&qp_init_attr->recv_cq->usecnt);
- qp->srq = qp_init_attr->srq;
- if (qp->srq)
- atomic_inc(&qp_init_attr->srq->usecnt);
- }
-
- qp->send_cq = qp_init_attr->send_cq;
- qp->xrcd = NULL;
-
- atomic_inc(&pd->usecnt);
- if (qp_init_attr->send_cq)
- atomic_inc(&qp_init_attr->send_cq->usecnt);
- if (qp_init_attr->rwq_ind_tbl)
- atomic_inc(&qp->rwq_ind_tbl->usecnt);
+ ib_qp_usecnt_inc(qp);
if (qp_init_attr->cap.max_rdma_ctxs) {
ret = rdma_rw_init_mrs(qp, qp_init_attr);
@@ -1303,7 +1376,7 @@ err:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(ib_create_named_qp);
+EXPORT_SYMBOL(ib_create_qp_kernel);
static const struct {
int valid;
@@ -1935,10 +2008,6 @@ int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
{
const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
- struct ib_pd *pd;
- struct ib_cq *scq, *rcq;
- struct ib_srq *srq;
- struct ib_rwq_ind_table *ind_tbl;
struct ib_qp_security *sec;
int ret;
@@ -1950,11 +2019,6 @@ int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
if (qp->real_qp != qp)
return __ib_destroy_shared_qp(qp);
- pd = qp->pd;
- scq = qp->send_cq;
- rcq = qp->recv_cq;
- srq = qp->srq;
- ind_tbl = qp->rwq_ind_tbl;
sec = qp->qp_sec;
if (sec)
ib_destroy_qp_security_begin(sec);
@@ -1963,30 +2027,24 @@ int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
rdma_rw_cleanup_mrs(qp);
rdma_counter_unbind_qp(qp, true);
- rdma_restrack_del(&qp->res);
ret = qp->device->ops.destroy_qp(qp, udata);
- if (!ret) {
- if (alt_path_sgid_attr)
- rdma_put_gid_attr(alt_path_sgid_attr);
- if (av_sgid_attr)
- rdma_put_gid_attr(av_sgid_attr);
- if (pd)
- atomic_dec(&pd->usecnt);
- if (scq)
- atomic_dec(&scq->usecnt);
- if (rcq)
- atomic_dec(&rcq->usecnt);
- if (srq)
- atomic_dec(&srq->usecnt);
- if (ind_tbl)
- atomic_dec(&ind_tbl->usecnt);
- if (sec)
- ib_destroy_qp_security_end(sec);
- } else {
+ if (ret) {
if (sec)
ib_destroy_qp_security_abort(sec);
+ return ret;
}
+ if (alt_path_sgid_attr)
+ rdma_put_gid_attr(alt_path_sgid_attr);
+ if (av_sgid_attr)
+ rdma_put_gid_attr(av_sgid_attr);
+
+ ib_qp_usecnt_dec(qp);
+ if (sec)
+ ib_destroy_qp_security_end(sec);
+
+ rdma_restrack_del(&qp->res);
+ kfree(qp);
return ret;
}
EXPORT_SYMBOL(ib_destroy_qp_user);
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index ea0054c60fbc..408dfbcc47b5 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -815,7 +815,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
rc = bnxt_re_destroy_gsi_sqp(qp);
if (rc)
- goto sh_fail;
+ return rc;
}
mutex_lock(&rdev->qp_lock);
@@ -826,10 +826,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
- kfree(qp);
return 0;
-sh_fail:
- return rc;
}
static u8 __from_ib_qp_type(enum ib_qp_type type)
@@ -1312,7 +1309,7 @@ out:
static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
struct bnxt_re_pd *pd)
{
- struct bnxt_re_sqp_entries *sqp_tbl = NULL;
+ struct bnxt_re_sqp_entries *sqp_tbl;
struct bnxt_re_dev *rdev;
struct bnxt_re_qp *sqp;
struct bnxt_re_ah *sah;
@@ -1320,7 +1317,7 @@ static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
rdev = qp->rdev;
/* Create a shadow QP to handle the QP1 traffic */
- sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
+ sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
GFP_KERNEL);
if (!sqp_tbl)
return -ENOMEM;
@@ -1402,27 +1399,22 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
return rc;
}
-struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata)
+int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata)
{
+ struct ib_pd *ib_pd = ib_qp->pd;
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_qp *qp;
+ struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
int rc;
rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
if (!rc) {
rc = -EINVAL;
- goto exit;
+ goto fail;
}
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- rc = -ENOMEM;
- goto exit;
- }
qp->rdev = rdev;
rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
if (rc)
@@ -1465,16 +1457,14 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
mutex_unlock(&rdev->qp_lock);
atomic_inc(&rdev->qp_count);
- return &qp->ib_qp;
+ return 0;
qp_destroy:
bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
free_umem:
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
fail:
- kfree(qp);
-exit:
- return ERR_PTR(rc);
+ return rc;
}
static u8 __from_ib_qp_state(enum ib_qp_state state)
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index d68671cc6173..b5c6e0f4f877 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -78,9 +78,9 @@ struct bnxt_re_srq {
};
struct bnxt_re_qp {
+ struct ib_qp ib_qp;
struct list_head list;
struct bnxt_re_dev *rdev;
- struct ib_qp ib_qp;
spinlock_t sq_lock; /* protect sq */
spinlock_t rq_lock; /* protect rq */
struct bnxt_qplib_qp qplib_qp;
@@ -179,9 +179,8 @@ int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
-struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata);
+int bnxt_re_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata);
int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata);
int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 4678bd6ec7d6..66268e41b470 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -711,6 +711,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, bnxt_re_qp, ib_qp),
INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
};
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index ac5f581aff4c..12f33467c672 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -990,9 +990,8 @@ int c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs,
struct ib_udata *udata);
int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
-struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *attrs,
- struct ib_udata *udata);
+int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata);
int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 881d515eb15a..e7337662aff8 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -499,6 +499,7 @@ static const struct ib_device_ops c4iw_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_cq, c4iw_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_mw, c4iw_mw, ibmw),
INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, c4iw_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
};
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index a81fa7a56edb..d20b4ef2c853 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2103,16 +2103,15 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
c4iw_put_wr_wait(qhp->wr_waitp);
-
- kfree(qhp);
return 0;
}
-struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
- struct ib_udata *udata)
+int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
{
+ struct ib_pd *pd = qp->pd;
struct c4iw_dev *rhp;
- struct c4iw_qp *qhp;
+ struct c4iw_qp *qhp = to_c4iw_qp(qp);
struct c4iw_pd *php;
struct c4iw_cq *schp;
struct c4iw_cq *rchp;
@@ -2124,44 +2123,36 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
- pr_debug("ib_pd %p\n", pd);
-
if (attrs->qp_type != IB_QPT_RC || attrs->create_flags)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
php = to_c4iw_pd(pd);
rhp = php->rhp;
schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
if (!schp || !rchp)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (!attrs->srq) {
if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
- return ERR_PTR(-E2BIG);
+ return -E2BIG;
rqsize = attrs->cap.max_recv_wr + 1;
if (rqsize < 8)
rqsize = 8;
}
if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
- return ERR_PTR(-E2BIG);
+ return -E2BIG;
sqsize = attrs->cap.max_send_wr + 1;
if (sqsize < 8)
sqsize = 8;
- qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
- if (!qhp)
- return ERR_PTR(-ENOMEM);
-
qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
- if (!qhp->wr_waitp) {
- ret = -ENOMEM;
- goto err_free_qhp;
- }
+ if (!qhp->wr_waitp)
+ return -ENOMEM;
qhp->wq.sq.size = sqsize;
qhp->wq.sq.memsize =
@@ -2339,7 +2330,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
- return &qhp->ibqp;
+ return 0;
err_free_ma_sync_key:
kfree(ma_sync_key_mm);
err_free_rq_db_key:
@@ -2359,9 +2350,7 @@ err_destroy_qp:
ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
err_free_wr_wait:
c4iw_put_wr_wait(qhp->wr_waitp);
-err_free_qhp:
- kfree(qhp);
- return ERR_PTR(ret);
+ return ret;
}
int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index 2b8ca099b381..87b1dadeb7fe 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_H_
@@ -27,8 +27,7 @@
struct efa_irq {
irq_handler_t handler;
void *data;
- int cpu;
- u32 vector;
+ u32 irqn;
cpumask_t affinity_hint_mask;
char name[EFA_IRQNAME_SIZE];
};
@@ -132,9 +131,8 @@ int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
-struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
+int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index be4a07bd268a..417dea5f90cf 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -83,8 +83,7 @@ static int efa_request_mgmnt_irq(struct efa_dev *dev)
int err;
irq = &dev->admin_irq;
- err = request_irq(irq->vector, irq->handler, 0, irq->name,
- irq->data);
+ err = request_irq(irq->irqn, irq->handler, 0, irq->name, irq->data);
if (err) {
dev_err(&dev->pdev->dev, "Failed to request admin irq (%d)\n",
err);
@@ -92,8 +91,8 @@ static int efa_request_mgmnt_irq(struct efa_dev *dev)
}
dev_dbg(&dev->pdev->dev, "Set affinity hint of mgmnt irq to %*pbl (irq vector: %d)\n",
- nr_cpumask_bits, &irq->affinity_hint_mask, irq->vector);
- irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
+ nr_cpumask_bits, &irq->affinity_hint_mask, irq->irqn);
+ irq_set_affinity_hint(irq->irqn, &irq->affinity_hint_mask);
return 0;
}
@@ -106,15 +105,13 @@ static void efa_setup_mgmnt_irq(struct efa_dev *dev)
"efa-mgmnt@pci:%s", pci_name(dev->pdev));
dev->admin_irq.handler = efa_intr_msix_mgmnt;
dev->admin_irq.data = dev;
- dev->admin_irq.vector =
+ dev->admin_irq.irqn =
pci_irq_vector(dev->pdev, dev->admin_msix_vector_idx);
cpu = cpumask_first(cpu_online_mask);
- dev->admin_irq.cpu = cpu;
cpumask_set_cpu(cpu,
&dev->admin_irq.affinity_hint_mask);
- dev_info(&dev->pdev->dev, "Setup irq:0x%p vector:%d name:%s\n",
- &dev->admin_irq,
- dev->admin_irq.vector,
+ dev_info(&dev->pdev->dev, "Setup irq:%d name:%s\n",
+ dev->admin_irq.irqn,
dev->admin_irq.name);
}
@@ -123,8 +120,8 @@ static void efa_free_mgmnt_irq(struct efa_dev *dev)
struct efa_irq *irq;
irq = &dev->admin_irq;
- irq_set_affinity_hint(irq->vector, NULL);
- free_irq(irq->vector, irq->data);
+ irq_set_affinity_hint(irq->irqn, NULL);
+ free_irq(irq->irqn, irq->data);
}
static int efa_set_mgmnt_irq(struct efa_dev *dev)
@@ -271,6 +268,7 @@ static const struct ib_device_ops efa_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, efa_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext),
};
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index be6d3ff0f1be..e5f9d90aad5e 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -30,7 +30,21 @@ struct efa_user_mmap_entry {
u8 mmap_flag;
};
-#define EFA_DEFINE_STATS(op) \
+#define EFA_DEFINE_DEVICE_STATS(op) \
+ op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
+ op(EFA_COMPLETED_CMDS, "completed_cmds") \
+ op(EFA_CMDS_ERR, "cmds_err") \
+ op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
+ op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
+ op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
+ op(EFA_CREATE_QP_ERR, "create_qp_err") \
+ op(EFA_CREATE_CQ_ERR, "create_cq_err") \
+ op(EFA_REG_MR_ERR, "reg_mr_err") \
+ op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
+ op(EFA_CREATE_AH_ERR, "create_ah_err") \
+ op(EFA_MMAP_ERR, "mmap_err")
+
+#define EFA_DEFINE_PORT_STATS(op) \
op(EFA_TX_BYTES, "tx_bytes") \
op(EFA_TX_PKTS, "tx_pkts") \
op(EFA_RX_BYTES, "rx_bytes") \
@@ -44,28 +58,24 @@ struct efa_user_mmap_entry {
op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
- op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
- op(EFA_COMPLETED_CMDS, "completed_cmds") \
- op(EFA_CMDS_ERR, "cmds_err") \
- op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
- op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
- op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
- op(EFA_CREATE_QP_ERR, "create_qp_err") \
- op(EFA_CREATE_CQ_ERR, "create_cq_err") \
- op(EFA_REG_MR_ERR, "reg_mr_err") \
- op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
- op(EFA_CREATE_AH_ERR, "create_ah_err") \
- op(EFA_MMAP_ERR, "mmap_err")
#define EFA_STATS_ENUM(ename, name) ename,
#define EFA_STATS_STR(ename, name) [ename] = name,
-enum efa_hw_stats {
- EFA_DEFINE_STATS(EFA_STATS_ENUM)
+enum efa_hw_device_stats {
+ EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM)
};
-static const char *const efa_stats_names[] = {
- EFA_DEFINE_STATS(EFA_STATS_STR)
+static const char *const efa_device_stats_names[] = {
+ EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR)
+};
+
+enum efa_hw_port_stats {
+ EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM)
+};
+
+static const char *const efa_port_stats_names[] = {
+ EFA_DEFINE_PORT_STATS(EFA_STATS_STR)
};
#define EFA_CHUNK_PAYLOAD_SHIFT 12
@@ -440,7 +450,6 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
qp->rq_size, DMA_TO_DEVICE);
}
- kfree(qp);
return 0;
}
@@ -599,17 +608,16 @@ static int efa_qp_validate_attr(struct efa_dev *dev,
return 0;
}
-struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct efa_com_create_qp_params create_qp_params = {};
struct efa_com_create_qp_result create_qp_resp;
- struct efa_dev *dev = to_edev(ibpd->device);
+ struct efa_dev *dev = to_edev(ibqp->device);
struct efa_ibv_create_qp_resp resp = {};
struct efa_ibv_create_qp cmd = {};
+ struct efa_qp *qp = to_eqp(ibqp);
struct efa_ucontext *ucontext;
- struct efa_qp *qp;
int err;
ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
@@ -654,14 +662,8 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
goto err_out;
}
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- err = -ENOMEM;
- goto err_out;
- }
-
create_qp_params.uarn = ucontext->uarn;
- create_qp_params.pd = to_epd(ibpd)->pdn;
+ create_qp_params.pd = to_epd(ibqp->pd)->pdn;
if (init_attr->qp_type == IB_QPT_UD) {
create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
@@ -672,7 +674,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
"Unsupported qp type %d driver qp type %d\n",
init_attr->qp_type, cmd.driver_qp_type);
err = -EOPNOTSUPP;
- goto err_free_qp;
+ goto err_out;
}
ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
@@ -690,7 +692,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
qp->rq_size, DMA_TO_DEVICE);
if (!qp->rq_cpu_addr) {
err = -ENOMEM;
- goto err_free_qp;
+ goto err_out;
}
ibdev_dbg(&dev->ibdev,
@@ -717,7 +719,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
qp->qp_handle = create_qp_resp.qp_handle;
qp->ibqp.qp_num = create_qp_resp.qp_num;
- qp->ibqp.qp_type = init_attr->qp_type;
qp->max_send_wr = init_attr->cap.max_send_wr;
qp->max_recv_wr = init_attr->cap.max_recv_wr;
qp->max_send_sge = init_attr->cap.max_send_sge;
@@ -737,7 +738,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
- return &qp->ibqp;
+ return 0;
err_remove_mmap_entries:
efa_qp_user_mmap_entries_remove(qp);
@@ -747,11 +748,9 @@ err_free_mapped:
if (qp->rq_size)
efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
qp->rq_size, DMA_TO_DEVICE);
-err_free_qp:
- kfree(qp);
err_out:
atomic64_inc(&dev->stats.create_qp_err);
- return ERR_PTR(err);
+ return err;
}
static const struct {
@@ -1904,33 +1903,53 @@ int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
return 0;
}
-struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
+struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num)
{
- return rdma_alloc_hw_stats_struct(efa_stats_names,
- ARRAY_SIZE(efa_stats_names),
+ return rdma_alloc_hw_stats_struct(efa_port_stats_names,
+ ARRAY_SIZE(efa_port_stats_names),
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev)
{
- /*
- * It is probably a bug that efa reports its port stats as device
- * stats
- */
- return efa_alloc_hw_port_stats(ibdev, 0);
+ return rdma_alloc_hw_stats_struct(efa_device_stats_names,
+ ARRAY_SIZE(efa_device_stats_names),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
-int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
- u32 port_num, int index)
+static int efa_fill_device_stats(struct efa_dev *dev,
+ struct rdma_hw_stats *stats)
+{
+ struct efa_com_stats_admin *as = &dev->edev.aq.stats;
+ struct efa_stats *s = &dev->stats;
+
+ stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
+ stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
+ stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
+ stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
+
+ stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
+ stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
+ stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
+ stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
+ stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
+ stats->value[EFA_ALLOC_UCONTEXT_ERR] =
+ atomic64_read(&s->alloc_ucontext_err);
+ stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
+ stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
+
+ return ARRAY_SIZE(efa_device_stats_names);
+}
+
+static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
+ u32 port_num)
{
struct efa_com_get_stats_params params = {};
union efa_com_get_stats_result result;
- struct efa_dev *dev = to_edev(ibdev);
struct efa_com_rdma_read_stats *rrs;
struct efa_com_messages_stats *ms;
struct efa_com_basic_stats *bs;
- struct efa_com_stats_admin *as;
- struct efa_stats *s;
int err;
params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
@@ -1969,24 +1988,16 @@ int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
- as = &dev->edev.aq.stats;
- stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
- stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
- stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
- stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
-
- s = &dev->stats;
- stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
- stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
- stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
- stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
- stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
- stats->value[EFA_ALLOC_UCONTEXT_ERR] =
- atomic64_read(&s->alloc_ucontext_err);
- stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
- stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
+ return ARRAY_SIZE(efa_port_stats_names);
+}
- return ARRAY_SIZE(efa_stats_names);
+int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ if (port_num)
+ return efa_fill_port_stats(to_edev(ibdev), stats, port_num);
+ else
+ return efa_fill_device_stats(to_edev(ibdev), stats);
}
enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 16543f717527..98c813ba4304 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -1,49 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#include <linux/topology.h>
#include <linux/cpumask.h>
#include <linux/module.h>
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index f94ed5d7c7a3..00854f21787f 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -1,49 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#ifndef _HFI1_AFFINITY_H
#define _HFI1_AFFINITY_H
diff --git a/drivers/infiniband/hw/hfi1/aspm.h b/drivers/infiniband/hw/hfi1/aspm.h
index 75d5d18da3da..df295f47b315 100644
--- a/drivers/infiniband/hw/hfi1/aspm.h
+++ b/drivers/infiniband/hw/hfi1/aspm.h
@@ -1,49 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015-2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#ifndef _ASPM_H
#define _ASPM_H
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index c97544638367..37273dc0c03c 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
/*
@@ -14414,7 +14372,7 @@ static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
goto bail;
- /* add qos entries to the the RSM map table */
+ /* add qos entries to the RSM map table */
for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
unsigned tctxt;
@@ -14893,7 +14851,7 @@ int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
}
/*
- * Start doing the clean up the the chip. Our clean up happens in multiple
+ * Start doing the clean up the chip. Our clean up happens in multiple
* stages and this is just the first.
*/
void hfi1_start_cleanup(struct hfi1_devdata *dd)
@@ -15336,7 +15294,7 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
init_completion(&dd->user_comp);
/* The user refcount starts with one to inidicate an active device */
- atomic_set(&dd->user_refcount, 1);
+ refcount_set(&dd->user_refcount, 1);
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index ac26649d4463..b2d53713da58 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -1,52 +1,10 @@
-#ifndef _CHIP_H
-#define _CHIP_H
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef _CHIP_H
+#define _CHIP_H
/*
* This file contains all of the defines that is specific to the HFI chip
*/
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index fb3ec9bff7a2..95a8d530d554 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -1,53 +1,11 @@
-#ifndef DEF_CHIP_REG
-#define DEF_CHIP_REG
-
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_CHIP_REG
+#define DEF_CHIP_REG
+
#define CORE 0x000000000000
#define CCE (CORE + 0x000000000000)
#define ASIC (CORE + 0x000000400000)
diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index ff423e546b80..995991d9709d 100644
--- a/drivers/infiniband/hw/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _COMMON_H
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 2ced236e1553..22a3cdb940be 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -1,49 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015-2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/kernel.h>
@@ -1358,7 +1317,7 @@ static void _driver_stats_seq_stop(struct seq_file *s, void *v)
{
}
-static u64 hfi1_sps_ints(void)
+static void hfi1_sps_show_ints(struct seq_file *s)
{
unsigned long index, flags;
struct hfi1_devdata *dd;
@@ -1369,24 +1328,19 @@ static u64 hfi1_sps_ints(void)
sps_ints += get_all_cpu_total(dd->int_counter);
}
xa_unlock_irqrestore(&hfi1_dev_table, flags);
- return sps_ints;
+ seq_write(s, &sps_ints, sizeof(u64));
}
static int _driver_stats_seq_show(struct seq_file *s, void *v)
{
loff_t *spos = v;
- char *buffer;
u64 *stats = (u64 *)&hfi1_stats;
- size_t sz = seq_get_buf(s, &buffer);
- if (sz < sizeof(u64))
- return SEQ_SKIP;
/* special case for interrupts */
if (*spos == 0)
- *(u64 *)buffer = hfi1_sps_ints();
+ hfi1_sps_show_ints(s);
else
- *(u64 *)buffer = stats[*spos];
- seq_commit(s, sizeof(u64));
+ seq_write(s, stats + *spos, sizeof(u64));
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/debugfs.h b/drivers/infiniband/hw/hfi1/debugfs.h
index 57e582caa5eb..29a5a8de2c41 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.h
+++ b/drivers/infiniband/hw/hfi1/debugfs.h
@@ -1,52 +1,11 @@
-#ifndef _HFI1_DEBUGFS_H
-#define _HFI1_DEBUGFS_H
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015, 2016, 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef _HFI1_DEBUGFS_H
+#define _HFI1_DEBUGFS_H
+
struct hfi1_ibdev;
#define DEBUGFS_SEQ_FILE_OPS(name) \
diff --git a/drivers/infiniband/hw/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c
index bbb6069dec2a..68a184c39941 100644
--- a/drivers/infiniband/hw/hfi1/device.c
+++ b/drivers/infiniband/hw/hfi1/device.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/cdev.h>
diff --git a/drivers/infiniband/hw/hfi1/device.h b/drivers/infiniband/hw/hfi1/device.h
index c3ec19cb0ac9..c371b5612b6b 100644
--- a/drivers/infiniband/hw/hfi1/device.h
+++ b/drivers/infiniband/hw/hfi1/device.h
@@ -1,52 +1,11 @@
-#ifndef _HFI1_DEVICE_H
-#define _HFI1_DEVICE_H
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef _HFI1_DEVICE_H
+#define _HFI1_DEVICE_H
+
int hfi1_cdev_init(int minor, const char *name,
const struct file_operations *fops,
struct cdev *cdev, struct device **devp,
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index f88bb4af245f..de411884386b 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015-2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/spinlock.h>
diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
index c22ab7b5163b..f275dd1abed8 100644
--- a/drivers/infiniband/hw/hfi1/efivar.c
+++ b/drivers/infiniband/hw/hfi1/efivar.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/ctype.h>
diff --git a/drivers/infiniband/hw/hfi1/efivar.h b/drivers/infiniband/hw/hfi1/efivar.h
index 94e9e70de568..5ebc2f07bbef 100644
--- a/drivers/infiniband/hw/hfi1/efivar.h
+++ b/drivers/infiniband/hw/hfi1/efivar.h
@@ -1,49 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#ifndef _HFI1_EFIVAR_H
#define _HFI1_EFIVAR_H
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c
index 1613af1c58d9..fbe958107457 100644
--- a/drivers/infiniband/hw/hfi1/eprom.c
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -1,49 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#include <linux/delay.h>
#include "hfi.h"
#include "common.h"
diff --git a/drivers/infiniband/hw/hfi1/eprom.h b/drivers/infiniband/hw/hfi1/eprom.h
index e774184f1643..772c516366ce 100644
--- a/drivers/infiniband/hw/hfi1/eprom.h
+++ b/drivers/infiniband/hw/hfi1/eprom.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
struct hfi1_devdata;
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.c b/drivers/infiniband/hw/hfi1/exp_rcv.c
index a414214f6035..b86f697c7956 100644
--- a/drivers/infiniband/hw/hfi1/exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include "exp_rcv.h"
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.h b/drivers/infiniband/hw/hfi1/exp_rcv.h
index f25362015095..c6291bbf723c 100644
--- a/drivers/infiniband/hw/hfi1/exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.h
@@ -1,52 +1,10 @@
-#ifndef _HFI1_EXP_RCV_H
-#define _HFI1_EXP_RCV_H
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef _HFI1_EXP_RCV_H
+#define _HFI1_EXP_RCV_H
#include "hfi.h"
#define EXP_TID_SET_EMPTY(set) (set.count == 0 && list_empty(&set.list))
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index 0dfbcfb048ca..e2e4f9f6fae2 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -1,49 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/kernel.h>
diff --git a/drivers/infiniband/hw/hfi1/fault.h b/drivers/infiniband/hw/hfi1/fault.h
index a83382700a7c..7fe7f47219db 100644
--- a/drivers/infiniband/hw/hfi1/fault.h
+++ b/drivers/infiniband/hw/hfi1/fault.h
@@ -1,51 +1,11 @@
-#ifndef _HFI1_FAULT_H
-#define _HFI1_FAULT_H
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
+#ifndef _HFI1_FAULT_H
+#define _HFI1_FAULT_H
+
#include <linux/fault-inject.h>
#include <linux/dcache.h>
#include <linux/bitops.h>
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 955c3637980e..1783a6ea5427 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1,50 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2015-2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/vmalloc.h>
@@ -194,7 +153,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
if (!((dd->flags & HFI1_PRESENT) && dd->kregbase1))
return -EINVAL;
- if (!atomic_inc_not_zero(&dd->user_refcount))
+ if (!refcount_inc_not_zero(&dd->user_refcount))
return -ENXIO;
/* The real work is performed later in assign_ctxt() */
@@ -213,7 +172,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
nomem:
kfree(fd);
fp->private_data = NULL;
- if (atomic_dec_and_test(&dd->user_refcount))
+ if (refcount_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
return -ENOMEM;
}
@@ -711,7 +670,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
deallocate_ctxt(uctxt);
done:
- if (atomic_dec_and_test(&dd->user_refcount))
+ if (refcount_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
cleanup_srcu_struct(&fdata->pq_srcu);
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 2cf102b5abd4..31e63e245ea9 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/firmware.h>
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 31664f43c27f..7fa9cd39254f 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1,53 +1,13 @@
-#ifndef _HFI1_KERNEL_H
-#define _HFI1_KERNEL_H
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2015-2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef _HFI1_KERNEL_H
+#define _HFI1_KERNEL_H
+
+#include <linux/refcount.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@@ -1384,7 +1344,7 @@ struct hfi1_devdata {
/* Number of verbs contexts which have disabled ASPM */
atomic_t aspm_disabled_cnt;
/* Keeps track of user space clients */
- atomic_t user_refcount;
+ refcount_t user_refcount;
/* Used to wait for outstanding user space clients before dev removal */
struct completion user_comp;
@@ -2601,7 +2561,7 @@ static inline bool hfi1_get_hdr_type(u32 lid, struct rdma_ah_attr *attr)
HFI1_PKT_TYPE_16B : HFI1_PKT_TYPE_9B;
/*
- * Return a 16B header type if either the the destination
+ * Return a 16B header type if either the destination
* or source lid is extended.
*/
if (hfi1_get_packet_type(rdma_ah_get_dlid(attr)) == HFI1_PKT_TYPE_16B)
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 0986aa065418..e3679d076eaa 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/pci.h>
@@ -650,12 +608,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
-
- if (loopback) {
- dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
- !default_pkey_idx);
- ppd->pkeys[!default_pkey_idx] = 0x8001;
- }
+ ppd->pkeys[0] = 0x8001;
INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
INIT_WORK(&ppd->link_up_work, handle_link_up);
@@ -1752,7 +1705,7 @@ static void wait_for_clients(struct hfi1_devdata *dd)
* Remove the device init value and complete the device if there is
* no clients or wait for active clients to finish.
*/
- if (atomic_dec_and_test(&dd->user_refcount))
+ if (refcount_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
wait_for_completion(&dd->user_comp);
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index 5ba5c11459e7..70376e6dba33 100644
--- a/drivers/infiniband/hw/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/pci.h>
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index cda81a7843c2..4df0700cbaba 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -1,52 +1,11 @@
-#ifndef _HFI1_IOWAIT_H
-#define _HFI1_IOWAIT_H
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef _HFI1_IOWAIT_H
+#define _HFI1_IOWAIT_H
+
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index 993f9838b6c8..e74ddbe46589 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -644,10 +644,13 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
/* came from non-list submit */
list_add_tail(&txreq->list, &txq->tx_list);
if (list_empty(&txq->wait.list)) {
+ struct hfi1_ibport *ibp = &sde->ppd->ibport_data;
+
if (!atomic_xchg(&txq->no_desc, 1)) {
trace_hfi1_txq_queued(txq);
hfi1_ipoib_stop_txq(txq);
}
+ ibp->rvp.n_dmawait++;
iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
}
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 1fe5e702f31d..4146a2113a95 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015-2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/net.h>
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 0205d308ef5e..1d45a008fa7f 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -1,49 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015 - 2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#ifndef _HFI1_MAD_H
#define _HFI1_MAD_H
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index d213f65d4cdd..876cc78a22cc 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -1,50 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2016 - 2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/mmu_notifier.h>
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
index 423aacc67e94..7417be2b9dc8 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.h
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
@@ -1,50 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#ifndef _HFI1_MMU_RB_H
#define _HFI1_MMU_RB_H
diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c
index 57a5f02ebc77..77d2ece9a9cb 100644
--- a/drivers/infiniband/hw/hfi1/msix.c
+++ b/drivers/infiniband/hw/hfi1/msix.c
@@ -1,49 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Copyright(c) 2018 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include "hfi.h"
diff --git a/drivers/infiniband/hw/hfi1/msix.h b/drivers/infiniband/hw/hfi1/msix.h
index e63e944bf0fc..9530ccb0a2ce 100644
--- a/drivers/infiniband/hw/hfi1/msix.h
+++ b/drivers/infiniband/hw/hfi1/msix.h
@@ -1,50 +1,8 @@
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* Copyright(c) 2018 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#ifndef _HFI1_MSIX_H
#define _HFI1_MSIX_H
diff --git a/drivers/infiniband/hw/hfi1/opa_compat.h b/drivers/infiniband/hw/hfi1/opa_compat.h
index 774215b95df5..31570b0cfd18 100644
--- a/drivers/infiniband/hw/hfi1/opa_compat.h
+++ b/drivers/infiniband/hw/hfi1/opa_compat.h
@@ -1,52 +1,10 @@
-#ifndef _LINUX_H
-#define _LINUX_H
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef _LINUX_H
+#define _LINUX_H
/*
* This header file is for OPA-specific definitions which are
* required by the HFI driver, and which aren't yet in the Linux
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 6f06e9920503..a0802332c8cb 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2019 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/pci.h>
@@ -92,25 +50,18 @@ int hfi1_pcie_init(struct hfi1_devdata *dd)
goto bail;
}
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
/*
* If the 64 bit setup fails, try 32 bit. Some systems
* do not setup 64 bit maps on systems with 2GB or less
* memory installed.
*/
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret);
goto bail;
}
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- } else {
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- }
- if (ret) {
- dd_dev_err(dd, "Unable to set DMA consistent mask: %d\n", ret);
- goto bail;
}
pci_set_master(pdev);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index e276522104c6..489b436f19bb 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015-2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/delay.h>
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index 9e5f08d2b985..ea714008f261 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -1,52 +1,10 @@
-#ifndef _PIO_H
-#define _PIO_H
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015-2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef _PIO_H
+#define _PIO_H
/* send context types */
#define SC_KERNEL 0
#define SC_VL15 1
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index 14bfd8287f4a..136f9a99e1e0 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include "hfi.h"
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index 4642d6ceb890..54cbd8f1a6c1 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/firmware.h>
diff --git a/drivers/infiniband/hw/hfi1/platform.h b/drivers/infiniband/hw/hfi1/platform.h
index eed0aa9124fa..1d51dca1bc30 100644
--- a/drivers/infiniband/hw/hfi1/platform.h
+++ b/drivers/infiniband/hw/hfi1/platform.h
@@ -1,49 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#ifndef __PLATFORM_H
#define __PLATFORM_H
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index e037df911512..6193d48b2c1f 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/err.h>
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index b0d053d12129..cdf87bc6ad94 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -1,52 +1,10 @@
-#ifndef _QP_H
-#define _QP_H
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef _QP_H
+#define _QP_H
#include <linux/hash.h>
#include <rdma/rdmavt_qp.h>
#include "verbs.h"
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index 38f311f855b5..19d7887a4f10 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/delay.h>
diff --git a/drivers/infiniband/hw/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h
index 36cf52359848..8f14111eaa47 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.h
+++ b/drivers/infiniband/hw/hfi1/qsfp.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
/* QSFP support common definitions, for hfi driver */
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 0174b8ee9f00..acd2b273ea7d 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/io.h>
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index c3fa1814c6a8..b0151b7293f5 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/spinlock.h>
@@ -459,7 +417,7 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
* send engine
* @qp: a pointer to QP
* @ps: a pointer to a structure with commonly lookup values for
- * the the send engine progress
+ * the send engine progress
* @tid: true if it is the tid leg
*
* This routine checks if the time slice for the QP has expired
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index e83dc562629e..2b6c24b7b586 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/spinlock.h>
@@ -1860,7 +1818,7 @@ retry:
/*
* The SDMA idle interrupt is not guaranteed to be ordered with respect
- * to updates to the the dma_head location in host memory. The head
+ * to updates to the dma_head location in host memory. The head
* value read might not be fully up to date. If there are pending
* descriptors and the SDMA idle interrupt fired then read from the
* CSR SDMA head instead to get the latest value from the hardware.
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index f57d55272dd2..d8170fcbfbdd 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -1,52 +1,11 @@
-#ifndef _HFI1_SDMA_H
-#define _HFI1_SDMA_H
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef _HFI1_SDMA_H
+#define _HFI1_SDMA_H
+
#include <linux/types.h>
#include <linux/list.h>
#include <asm/byteorder.h>
diff --git a/drivers/infiniband/hw/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h
index 514a4784566b..e262fb5c5ec6 100644
--- a/drivers/infiniband/hw/hfi1/sdma_txreq.h
+++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef HFI1_SDMA_TXREQ_H
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index acfcbedebe0d..3b3407dc7c21 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -1,49 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015-2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#include <linux/ctype.h>
#include <rdma/ib_sysfs.h>
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 233ea48b72c8..2a7abf7a1f7f 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -605,7 +605,7 @@ static void __trigger_tid_waiter(struct rvt_qp *qp)
* to this call via first_qp().
*
* If the qp trigger was already scheduled (!rval)
- * the the reference is dropped, otherwise the resume
+ * the reference is dropped, otherwise the resume
* or the destroy cancel will dispatch the reference.
*/
static void tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp)
@@ -5174,7 +5174,7 @@ bail_no_tx:
priv->s_flags &= ~RVT_S_BUSY;
/*
* If we didn't get a txreq, the QP will be woken up later to try
- * again, set the flags to the the wake up which work item to wake
+ * again, set the flags to the wake up which work item to wake
* up.
* (A better algorithm should be found to do this and generalize the
* sleep/wakeup flags.)
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 715c81308b85..8302469582c6 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -530,7 +488,7 @@ struct hfi1_ctxt_hist {
atomic_t data[255];
};
-struct hfi1_ctxt_hist hist = {
+static struct hfi1_ctxt_hist hist = {
.count = ATOMIC_INIT(0)
};
diff --git a/drivers/infiniband/hw/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
index 1ce551864118..31e027c5a0c0 100644
--- a/drivers/infiniband/hw/hfi1/trace.h
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
diff --git a/drivers/infiniband/hw/hfi1/trace_ctxts.h b/drivers/infiniband/hw/hfi1/trace_ctxts.h
index d8c168dc3ea8..1858eaf33b18 100644
--- a/drivers/infiniband/hw/hfi1/trace_ctxts.h
+++ b/drivers/infiniband/hw/hfi1/trace_ctxts.h
@@ -1,49 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
-*
-* This file is provided under a dual BSD/GPLv2 license. When using or
-* redistributing this file, you may do so under either license.
-*
-* GPL LICENSE SUMMARY
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* General Public License for more details.
-*
-* BSD LICENSE
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions
-* are met:
-*
-* - Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* - Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in
-* the documentation and/or other materials provided with the
-* distribution.
-* - Neither the name of Intel Corporation nor the names of its
-* contributors may be used to endorse or promote products derived
-* from this software without specific prior written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*
*/
+
#if !defined(__HFI1_TRACE_CTXTS_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_CTXTS_H
diff --git a/drivers/infiniband/hw/hfi1/trace_dbg.h b/drivers/infiniband/hw/hfi1/trace_dbg.h
index de7a87392b8d..707f1053f0b7 100644
--- a/drivers/infiniband/hw/hfi1/trace_dbg.h
+++ b/drivers/infiniband/hw/hfi1/trace_dbg.h
@@ -1,49 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
-*
-* This file is provided under a dual BSD/GPLv2 license. When using or
-* redistributing this file, you may do so under either license.
-*
-* GPL LICENSE SUMMARY
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* General Public License for more details.
-*
-* BSD LICENSE
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions
-* are met:
-*
-* - Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* - Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in
-* the documentation and/or other materials provided with the
-* distribution.
-* - Neither the name of Intel Corporation nor the names of its
-* contributors may be used to endorse or promote products derived
-* from this software without specific prior written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*
*/
+
#if !defined(__HFI1_TRACE_EXTRA_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_EXTRA_H
diff --git a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
index 2f84290a88ca..b33f8f575f8a 100644
--- a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
+++ b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
@@ -1,49 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015 - 2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#if !defined(__HFI1_TRACE_IBHDRS_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_IBHDRS_H
diff --git a/drivers/infiniband/hw/hfi1/trace_misc.h b/drivers/infiniband/hw/hfi1/trace_misc.h
index 93338988b922..742675fa7576 100644
--- a/drivers/infiniband/hw/hfi1/trace_misc.h
+++ b/drivers/infiniband/hw/hfi1/trace_misc.h
@@ -1,49 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
-*
-* This file is provided under a dual BSD/GPLv2 license. When using or
-* redistributing this file, you may do so under either license.
-*
-* GPL LICENSE SUMMARY
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* General Public License for more details.
-*
-* BSD LICENSE
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions
-* are met:
-*
-* - Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* - Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in
-* the documentation and/or other materials provided with the
-* distribution.
-* - Neither the name of Intel Corporation nor the names of its
-* contributors may be used to endorse or promote products derived
-* from this software without specific prior written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*
*/
+
#if !defined(__HFI1_TRACE_MISC_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_MISC_H
diff --git a/drivers/infiniband/hw/hfi1/trace_mmu.h b/drivers/infiniband/hw/hfi1/trace_mmu.h
index 3b7abbc382c2..187e9244fe5e 100644
--- a/drivers/infiniband/hw/hfi1/trace_mmu.h
+++ b/drivers/infiniband/hw/hfi1/trace_mmu.h
@@ -1,49 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#if !defined(__HFI1_TRACE_MMU_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_MMU_H
diff --git a/drivers/infiniband/hw/hfi1/trace_rc.h b/drivers/infiniband/hw/hfi1/trace_rc.h
index 5f49e1eeb211..7c3a1c77536d 100644
--- a/drivers/infiniband/hw/hfi1/trace_rc.h
+++ b/drivers/infiniband/hw/hfi1/trace_rc.h
@@ -1,49 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015, 2016, 2017 Intel Corporation.
-*
-* This file is provided under a dual BSD/GPLv2 license. When using or
-* redistributing this file, you may do so under either license.
-*
-* GPL LICENSE SUMMARY
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* General Public License for more details.
-*
-* BSD LICENSE
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions
-* are met:
-*
-* - Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* - Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in
-* the documentation and/or other materials provided with the
-* distribution.
-* - Neither the name of Intel Corporation nor the names of its
-* contributors may be used to endorse or promote products derived
-* from this software without specific prior written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*
*/
+
#if !defined(__HFI1_TRACE_RC_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_RC_H
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
index 168079ed122c..0da22f9bc75e 100644
--- a/drivers/infiniband/hw/hfi1/trace_rx.h
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -1,49 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#if !defined(__HFI1_TRACE_RX_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_RX_H
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
index f1922a7619fe..7318aa6150b5 100644
--- a/drivers/infiniband/hw/hfi1/trace_tx.h
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015 - 2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_TX_H
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 5b0f536b34e0..4e9d6aa39305 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include "hfi.h"
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 6ecb984c85fa..b64b9d7e08f0 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2019 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/net.h>
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 58dcab2679d9..0c86e9d354f8 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -1,49 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2015-2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <asm/page.h>
#include <linux/string.h>
@@ -177,8 +135,8 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
struct mm_struct *mm;
if (mapped) {
- pci_unmap_single(dd->pcidev, node->dma_addr,
- node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&dd->pcidev->dev, node->dma_addr,
+ node->npages * PAGE_SIZE, DMA_FROM_DEVICE);
pages = &node->pages[idx];
mm = mm_from_tid_node(node);
} else {
@@ -739,9 +697,8 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
if (!node)
return -ENOMEM;
- phys = pci_map_single(dd->pcidev,
- __va(page_to_phys(pages[0])),
- npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ phys = dma_map_single(&dd->pcidev->dev, __va(page_to_phys(pages[0])),
+ npages * PAGE_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(&dd->pcidev->dev, phys)) {
dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
phys);
@@ -783,8 +740,8 @@ out_unmap:
hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
node->rcventry, node->notifier.interval_tree.start,
node->phys, ret);
- pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&dd->pcidev->dev, phys, npages * PAGE_SIZE,
+ DMA_FROM_DEVICE);
kfree(node);
return -EFAULT;
}
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index d45c7b6988d4..8c53e416bf84 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
@@ -1,53 +1,12 @@
-#ifndef _HFI1_USER_EXP_RCV_H
-#define _HFI1_USER_EXP_RCV_H
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2020 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef _HFI1_USER_EXP_RCV_H
+#define _HFI1_USER_EXP_RCV_H
+
#include "hfi.h"
#include "exp_rcv.h"
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index 3b505006c0a6..7bce963e2ae6 100644
--- a/drivers/infiniband/hw/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015-2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/mm.h>
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index da5b2e37355a..5b11c8282744 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -1,50 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2020 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/device.h>
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index fabe58139906..ea56eb57e656 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -1,52 +1,11 @@
-#ifndef _HFI1_USER_SDMA_H
-#define _HFI1_USER_SDMA_H
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2020 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef _HFI1_USER_SDMA_H
+#define _HFI1_USER_SDMA_H
+
#include <linux/device.h>
#include <linux/wait.h>
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 9b198c35e1a1..26bea51869bf 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <rdma/ib_mad.h>
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 420df17cd184..38565532d654 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef HFI1_VERBS_H
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index 8f766dd3f61c..cfecc81a27c7 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2016 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include "hfi.h"
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 4bdfc7932376..2a7e0ae892e9 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef HFI1_VERBS_TXREQ_H
diff --git a/drivers/infiniband/hw/hfi1/vnic.h b/drivers/infiniband/hw/hfi1/vnic.h
index a7a450e2cf2c..34f03e7770be 100644
--- a/drivers/infiniband/hw/hfi1/vnic.h
+++ b/drivers/infiniband/hw/hfi1/vnic.h
@@ -1,52 +1,10 @@
-#ifndef _HFI1_VNIC_H
-#define _HFI1_VNIC_H
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2017 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef _HFI1_VNIC_H
+#define _HFI1_VNIC_H
#include <rdma/opa_vnic.h>
#include "hfi.h"
#include "sdma.h"
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index 7e79c0578ecf..3650fababf25 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2017 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
/*
diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c
index 7d90b900131b..c3f0f8d877c3 100644
--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
+++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2017 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
/*
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 1b02d3bc9bae..d4fa0fd52294 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -36,76 +36,6 @@
#include "hns_roce_device.h"
#include <rdma/ib_umem.h>
-int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
-{
- int ret = 0;
-
- spin_lock(&bitmap->lock);
- *obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
- if (*obj >= bitmap->max) {
- bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
- & bitmap->mask;
- *obj = find_first_zero_bit(bitmap->table, bitmap->max);
- }
-
- if (*obj < bitmap->max) {
- set_bit(*obj, bitmap->table);
- bitmap->last = (*obj + 1);
- if (bitmap->last == bitmap->max)
- bitmap->last = 0;
- *obj |= bitmap->top;
- } else {
- ret = -EINVAL;
- }
-
- spin_unlock(&bitmap->lock);
-
- return ret;
-}
-
-void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj)
-{
- obj &= bitmap->max + bitmap->reserved_top - 1;
-
- spin_lock(&bitmap->lock);
- clear_bit(obj, bitmap->table);
-
- bitmap->last = min(bitmap->last, obj);
- bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
- & bitmap->mask;
- spin_unlock(&bitmap->lock);
-}
-
-int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
- u32 reserved_bot, u32 reserved_top)
-{
- u32 i;
-
- if (num != roundup_pow_of_two(num))
- return -EINVAL;
-
- bitmap->last = 0;
- bitmap->top = 0;
- bitmap->max = num - reserved_top;
- bitmap->mask = mask;
- bitmap->reserved_top = reserved_top;
- spin_lock_init(&bitmap->lock);
- bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long),
- GFP_KERNEL);
- if (!bitmap->table)
- return -ENOMEM;
-
- for (i = 0; i < reserved_bot; ++i)
- set_bit(i, bitmap->table);
-
- return 0;
-}
-
-void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
-{
- kfree(bitmap->table);
-}
-
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
{
struct hns_roce_buf_list *trunks;
@@ -248,10 +178,10 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
ida_destroy(&hr_dev->xrcd_ida.ida);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
- hns_roce_cleanup_srq_table(hr_dev);
+ ida_destroy(&hr_dev->srq_table.srq_ida.ida);
hns_roce_cleanup_qp_table(hr_dev);
hns_roce_cleanup_cq_table(hr_dev);
ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
ida_destroy(&hr_dev->pd_ida.ida);
- hns_roce_cleanup_uar_table(hr_dev);
+ ida_destroy(&hr_dev->uar_ida.ida);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index d40ea3d87260..751470c7a2ce 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -42,8 +42,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
found:
offset = virt - page_addr;
- db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
- db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
+ db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset;
+ db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset;
db->u.user_page = page;
refcount_inc(&page->refcount);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 991f65269fa6..9467c39e3d28 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -496,6 +496,12 @@ struct hns_roce_bank {
u32 next; /* Next ID to allocate. */
};
+struct hns_roce_idx_table {
+ u32 *spare_idx;
+ u32 head;
+ u32 tail;
+};
+
struct hns_roce_qp_table {
struct hns_roce_hem_table qp_table;
struct hns_roce_hem_table irrl_table;
@@ -504,6 +510,7 @@ struct hns_roce_qp_table {
struct mutex scc_mutex;
struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
struct mutex bank_mutex;
+ struct hns_roce_idx_table idx_table;
};
struct hns_roce_cq_table {
@@ -514,7 +521,7 @@ struct hns_roce_cq_table {
};
struct hns_roce_srq_table {
- struct hns_roce_bitmap bitmap;
+ struct hns_roce_ida srq_ida;
struct xarray xa;
struct hns_roce_hem_table table;
};
@@ -963,7 +970,7 @@ struct hns_roce_dev {
struct hns_roce_cmdq cmd;
struct hns_roce_ida pd_ida;
struct hns_roce_ida xrcd_ida;
- struct hns_roce_uar_table uar_table;
+ struct hns_roce_ida uar_ida;
struct hns_roce_mr_table mr_table;
struct hns_roce_cq_table cq_table;
struct hns_roce_srq_table srq_table;
@@ -1118,10 +1125,8 @@ static inline u8 get_tclass(const struct ib_global_route *grh)
grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
}
-int hns_roce_init_uar_table(struct hns_roce_dev *dev);
+void hns_roce_init_uar_table(struct hns_roce_dev *dev);
int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
-void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
-void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev);
int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
@@ -1146,20 +1151,14 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
-void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
-int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
+int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
+void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
-void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev);
-int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
-void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj);
-int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
- u32 reserved_bot, u32 resetrved_top);
-void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
@@ -1216,9 +1215,8 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
-struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
+int hns_roce_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index a3305d196675..e0f59b8d7d5d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -758,7 +758,7 @@ static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
- qp = hns_roce_create_qp(pd, &init_attr, NULL);
+ qp = ib_create_qp(pd, &init_attr);
if (IS_ERR(qp)) {
dev_err(dev, "Create loop qp for mr free failed!");
return NULL;
@@ -923,7 +923,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
create_lp_qp_failed:
for (i -= 1; i >= 0; i--) {
hr_qp = free_mr->mr_free_qp[i];
- if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL))
+ if (ib_destroy_qp(&hr_qp->ibqp))
dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
}
@@ -953,7 +953,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
if (!hr_qp)
continue;
- ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL);
+ ret = ib_destroy_qp(&hr_qp->ibqp);
if (ret)
dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
i, ret);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 594d4cef31b3..5b9953105752 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1248,8 +1248,7 @@ static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
{
memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flag =
- cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
+ desc->flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
if (is_read)
desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
else
@@ -1288,16 +1287,11 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
/* Write to hardware */
roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head);
- /* If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
- do {
- if (hns_roce_cmq_csq_done(hr_dev))
- break;
- udelay(1);
- } while (++timeout < priv->cmq.tx_timeout);
- }
+ do {
+ if (hns_roce_cmq_csq_done(hr_dev))
+ break;
+ udelay(1);
+ } while (++timeout < priv->cmq.tx_timeout);
if (hns_roce_cmq_csq_done(hr_dev)) {
for (ret = 0, i = 0; i < num; i++) {
@@ -1761,8 +1755,7 @@ static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
if (ret)
return ret;
- desc.flag =
- cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
+ desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
@@ -2004,6 +1997,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ caps->flags |= HNS_ROCE_CAP_FLAG_STASH;
caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
} else {
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
@@ -4114,6 +4108,9 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
hr_reg_enable(context, QPC_RQ_RECORD_EN);
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
+ hr_reg_enable(context, QPC_OWNER_MODE);
+
hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_L,
lower_32_bits(hr_qp->rdb.dma) >> 1);
hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
@@ -4146,8 +4143,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
-
/*
* In v2 engine, software pass context and context mask to hardware
* when modifying qp. If software need modify some fields in context,
@@ -4172,11 +4167,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
hr_reg_clear(qpc_mask, QPC_SRQN);
}
-
- if (attr_mask & IB_QP_DEST_QPN) {
- hr_reg_write(context, QPC_DQPN, hr_qp->qpn);
- hr_reg_clear(qpc_mask, QPC_DQPN);
- }
}
static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
@@ -4486,9 +4476,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
hr_reg_clear(qpc_mask, QPC_CHECK_FLG);
- hr_reg_write(context, QPC_LSN, 0x100);
- hr_reg_clear(qpc_mask, QPC_LSN);
-
hr_reg_clear(qpc_mask, QPC_V2_IRRL_HEAD);
return 0;
@@ -4507,15 +4494,23 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
{
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ u32 *spare_idx = hr_dev->qp_table.idx_table.spare_idx;
+ u32 *head = &hr_dev->qp_table.idx_table.head;
+ u32 *tail = &hr_dev->qp_table.idx_table.tail;
struct hns_roce_dip *hr_dip;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
+ spare_idx[*tail] = ibqp->qp_num;
+ *tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1);
+
list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
- if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16))
+ if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16)) {
+ *dip_idx = hr_dip->dip_idx;
goto out;
+ }
}
/* If no dgid is found, a new dip and a mapping between dgid and
@@ -4528,7 +4523,8 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
}
memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
- hr_dip->dip_idx = *dip_idx = ibqp->qp_num;
+ hr_dip->dip_idx = *dip_idx = spare_idx[*head];
+ *head = (*head == hr_dev->caps.num_qps - 1) ? 0 : (*head + 1);
list_add_tail(&hr_dip->node, &hr_dev->dip_list);
out:
@@ -5127,7 +5123,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->rq_psn = hr_reg_read(&context, QPC_RX_REQ_EPSN);
qp_attr->sq_psn = (u32)hr_reg_read(&context, QPC_SQ_CUR_PSN);
- qp_attr->dest_qp_num = (u8)hr_reg_read(&context, QPC_DQPN);
+ qp_attr->dest_qp_num = hr_reg_read(&context, QPC_DQPN);
qp_attr->qp_access_flags =
((hr_reg_read(&context, QPC_RRE)) << V2_QP_RRE_S) |
((hr_reg_read(&context, QPC_RWE)) << V2_QP_RWE_S) |
@@ -5224,7 +5220,6 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
if (send_cq && send_cq != recv_cq)
__hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
-
}
hns_roce_qp_remove(hr_dev, hr_qp);
@@ -6118,35 +6113,32 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
if (ret) {
- dev_err(dev, "eq create failed.\n");
+ dev_err(dev, "failed to create eq.\n");
goto err_create_eq_fail;
}
}
- /* enable irq */
- hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
+ hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
+ if (!hr_dev->irq_workq) {
+ dev_err(dev, "failed to create irq workqueue.\n");
+ ret = -ENOMEM;
+ goto err_create_eq_fail;
+ }
- ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
- aeq_num, other_num);
+ ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num, aeq_num,
+ other_num);
if (ret) {
- dev_err(dev, "Request irq failed.\n");
+ dev_err(dev, "failed to request irq.\n");
goto err_request_irq_fail;
}
- hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
- if (!hr_dev->irq_workq) {
- dev_err(dev, "Create irq workqueue failed!\n");
- ret = -ENOMEM;
- goto err_create_wq_fail;
- }
+ /* enable irq */
+ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
return 0;
-err_create_wq_fail:
- __hns_roce_free_irq(hr_dev);
-
err_request_irq_fail:
- hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
+ destroy_workqueue(hr_dev->irq_workq);
err_create_eq_fail:
for (i -= 1; i >= 0; i--)
@@ -6367,7 +6359,6 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
-
return 0;
reset_chk_err:
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index b8a09d411e2e..4d904d5e82be 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -129,19 +129,13 @@
#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18)
-#define HNS_ROCE_CMD_FLAG_IN_VALID_SHIFT 0
-#define HNS_ROCE_CMD_FLAG_OUT_VALID_SHIFT 1
-#define HNS_ROCE_CMD_FLAG_NEXT_SHIFT 2
-#define HNS_ROCE_CMD_FLAG_WR_OR_RD_SHIFT 3
-#define HNS_ROCE_CMD_FLAG_NO_INTR_SHIFT 4
-#define HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT 5
-
-#define HNS_ROCE_CMD_FLAG_IN BIT(HNS_ROCE_CMD_FLAG_IN_VALID_SHIFT)
-#define HNS_ROCE_CMD_FLAG_OUT BIT(HNS_ROCE_CMD_FLAG_OUT_VALID_SHIFT)
-#define HNS_ROCE_CMD_FLAG_NEXT BIT(HNS_ROCE_CMD_FLAG_NEXT_SHIFT)
-#define HNS_ROCE_CMD_FLAG_WR BIT(HNS_ROCE_CMD_FLAG_WR_OR_RD_SHIFT)
-#define HNS_ROCE_CMD_FLAG_NO_INTR BIT(HNS_ROCE_CMD_FLAG_NO_INTR_SHIFT)
-#define HNS_ROCE_CMD_FLAG_ERR_INTR BIT(HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT)
+enum {
+ HNS_ROCE_CMD_FLAG_IN = BIT(0),
+ HNS_ROCE_CMD_FLAG_OUT = BIT(1),
+ HNS_ROCE_CMD_FLAG_NEXT = BIT(2),
+ HNS_ROCE_CMD_FLAG_WR = BIT(3),
+ HNS_ROCE_CMD_FLAG_ERR_INTR = BIT(5),
+};
#define HNS_ROCE_CMQ_DESC_NUM_S 3
@@ -1413,7 +1407,6 @@ struct hns_roce_cmq_desc {
__le32 rsv[4];
} func_info;
};
-
};
struct hns_roce_v2_cmq_ring {
@@ -1447,7 +1440,7 @@ struct hns_roce_v2_priv {
struct hns_roce_dip {
u8 dgid[GID_LEN_V2];
- u8 dip_idx;
+ u32 dip_idx;
struct list_head node; /* all dips are on a list */
};
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index cc6eab14a222..5d39bd08582a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -325,7 +325,7 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
return 0;
error_fail_copy_to_udata:
- hns_roce_uar_free(hr_dev, &context->uar);
+ ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
error_fail_uar_alloc:
return ret;
@@ -334,8 +334,9 @@ error_fail_uar_alloc:
static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
{
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
- hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
+ ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
}
static int hns_roce_mmap(struct ib_ucontext *context,
@@ -454,6 +455,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, hns_roce_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
};
@@ -736,11 +738,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
mutex_init(&hr_dev->pgdir_mutex);
}
- ret = hns_roce_init_uar_table(hr_dev);
- if (ret) {
- dev_err(dev, "Failed to initialize uar table. aborting\n");
- return ret;
- }
+ hns_roce_init_uar_table(hr_dev);
ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
if (ret) {
@@ -748,6 +746,12 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
goto err_uar_table_free;
}
+ ret = hns_roce_init_qp_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "Failed to init qp_table.\n");
+ goto err_uar_table_free;
+ }
+
hns_roce_init_pd_table(hr_dev);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
@@ -757,32 +761,14 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
hns_roce_init_cq_table(hr_dev);
- hns_roce_init_qp_table(hr_dev);
-
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
- ret = hns_roce_init_srq_table(hr_dev);
- if (ret) {
- dev_err(dev,
- "Failed to init share receive queue table.\n");
- goto err_qp_table_free;
- }
+ hns_roce_init_srq_table(hr_dev);
}
return 0;
-err_qp_table_free:
- hns_roce_cleanup_qp_table(hr_dev);
- hns_roce_cleanup_cq_table(hr_dev);
- ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
-
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
- ida_destroy(&hr_dev->xrcd_ida.ida);
-
- ida_destroy(&hr_dev->pd_ida.ida);
- hns_roce_uar_free(hr_dev, &hr_dev->priv_uar);
-
err_uar_table_free:
- hns_roce_cleanup_uar_table(hr_dev);
+ ida_destroy(&hr_dev->uar_ida.ida);
return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 006c84bb3f9f..7089ac780291 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -352,7 +352,9 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
free_cmd_mbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- return ERR_PTR(ret);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
}
int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index ea5663630985..81ffad77ae42 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -85,13 +85,18 @@ int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
+ struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
struct resource *res;
- int ret;
+ int id;
/* Using bitmap to manager UAR index */
- ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->logic_idx);
- if (ret)
+ id = ida_alloc_range(&uar_ida->ida, uar_ida->min, uar_ida->max,
+ GFP_KERNEL);
+ if (id < 0) {
+ ibdev_err(&hr_dev->ib_dev, "failed to alloc uar id(%d).\n", id);
return -ENOMEM;
+ }
+ uar->logic_idx = (unsigned long)id;
if (uar->logic_idx > 0 && hr_dev->caps.phy_num_uars > 1)
uar->index = (uar->logic_idx - 1) %
@@ -102,6 +107,7 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
if (!dev_is_pci(hr_dev->dev)) {
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
if (!res) {
+ ida_free(&uar_ida->ida, id);
dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
return -EINVAL;
}
@@ -114,22 +120,13 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
return 0;
}
-void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
+void hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
{
- hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->logic_idx);
-}
+ struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
-int hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
-{
- return hns_roce_bitmap_init(&hr_dev->uar_table.bitmap,
- hr_dev->caps.num_uars,
- hr_dev->caps.num_uars - 1,
- hr_dev->caps.reserved_uars, 0);
-}
-
-void hns_roce_cleanup_uar_table(struct hns_roce_dev *hr_dev)
-{
- hns_roce_bitmap_cleanup(&hr_dev->uar_table.bitmap);
+ ida_init(&uar_ida->ida);
+ uar_ida->max = hr_dev->caps.num_uars - 1;
+ uar_ida->min = hr_dev->caps.reserved_uars;
}
static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev, u32 *xrcdn)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index b101b7e578f2..9af4509894e6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -715,7 +715,6 @@ static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
/* allocate recv inline buf */
wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe),
GFP_KERNEL);
-
if (!wqe_list)
goto err;
@@ -823,77 +822,104 @@ static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
hns_roce_qp_has_rq(init_attr));
}
+static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp *ucmd,
+ struct hns_roce_ib_create_qp_resp *resp)
+{
+ struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext, ibucontext);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
+ ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to map user SQ doorbell, ret = %d.\n",
+ ret);
+ goto err_out;
+ }
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
+ }
+
+ if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
+ ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to map user RQ doorbell, ret = %d.\n",
+ ret);
+ goto err_sdb;
+ }
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
+ }
+
+ return 0;
+
+err_sdb:
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
+ hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
+err_out:
+ return ret;
+}
+
+static int alloc_kernel_qp_db(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ hr_qp->sq.db_reg = hr_dev->mem_base +
+ HNS_ROCE_DWQE_SIZE * hr_qp->qpn;
+ else
+ hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
+
+ hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
+
+ if (kernel_qp_has_rdb(hr_dev, init_attr)) {
+ ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to alloc kernel RQ doorbell, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ *hr_qp->rdb.db_record = 0;
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
+ }
+
+ return 0;
+}
+
static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata,
struct hns_roce_ib_create_qp *ucmd,
struct hns_roce_ib_create_qp_resp *resp)
{
- struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
- udata, struct hns_roce_ucontext, ibucontext);
- struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE)
hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB;
if (udata) {
- if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
- ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr,
- &hr_qp->sdb);
- if (ret) {
- ibdev_err(ibdev,
- "failed to map user SQ doorbell, ret = %d.\n",
- ret);
- goto err_out;
- }
- hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
- resp->cap_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
- }
-
- if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
- ret = hns_roce_db_map_user(uctx, ucmd->db_addr,
- &hr_qp->rdb);
- if (ret) {
- ibdev_err(ibdev,
- "failed to map user RQ doorbell, ret = %d.\n",
- ret);
- goto err_sdb;
- }
- hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
- resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
- }
+ ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd,
+ resp);
+ if (ret)
+ return ret;
} else {
- if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
- hr_qp->sq.db_reg = hr_dev->mem_base +
- HNS_ROCE_DWQE_SIZE * hr_qp->qpn;
- else
- hr_qp->sq.db_reg =
- hr_dev->reg_base + hr_dev->sdb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
-
- hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
-
- if (kernel_qp_has_rdb(hr_dev, init_attr)) {
- ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
- if (ret) {
- ibdev_err(ibdev,
- "failed to alloc kernel RQ doorbell, ret = %d.\n",
- ret);
- goto err_out;
- }
- *hr_qp->rdb.db_record = 0;
- hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
- }
+ ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr);
+ if (ret)
+ return ret;
}
return 0;
-err_sdb:
- if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
- hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
-err_out:
- return ret;
}
static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
@@ -959,8 +985,6 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
- hr_qp->ibqp.qp_type = init_attr->qp_type;
-
if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline)
init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline;
@@ -1073,6 +1097,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
if (udata) {
+ resp.cap_flags = hr_qp->en_flags;
ret = ib_copy_to_udata(udata, &resp,
min(udata->outlen, sizeof(resp)));
if (ret) {
@@ -1121,8 +1146,6 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
free_qp_buf(hr_dev, hr_qp);
free_kernel_wrid(hr_qp);
free_qp_db(hr_dev, hr_qp, udata);
-
- kfree(hr_qp);
}
static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
@@ -1154,31 +1177,21 @@ out:
return -EOPNOTSUPP;
}
-struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct ib_device *ibdev = pd ? pd->device : init_attr->xrcd->device;
+ struct ib_device *ibdev = qp->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
- struct hns_roce_qp *hr_qp;
+ struct hns_roce_qp *hr_qp = to_hr_qp(qp);
+ struct ib_pd *pd = qp->pd;
int ret;
ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
if (ret)
- return ERR_PTR(ret);
-
- hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
- if (!hr_qp)
- return ERR_PTR(-ENOMEM);
-
- if (init_attr->qp_type == IB_QPT_XRC_INI)
- init_attr->recv_cq = NULL;
+ return ret;
- if (init_attr->qp_type == IB_QPT_XRC_TGT) {
+ if (init_attr->qp_type == IB_QPT_XRC_TGT)
hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
- init_attr->recv_cq = NULL;
- init_attr->send_cq = NULL;
- }
if (init_attr->qp_type == IB_QPT_GSI) {
hr_qp->port = init_attr->port_num - 1;
@@ -1186,15 +1199,11 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
}
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
- if (ret) {
+ if (ret)
ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n",
init_attr->qp_type, ret);
- kfree(hr_qp);
- return ERR_PTR(ret);
- }
-
- return &hr_qp->ibqp;
+ return ret;
}
int to_hr_qp_type(int qp_type)
@@ -1321,17 +1330,8 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (ret)
goto out;
- if (cur_state == new_state && cur_state == IB_QPS_RESET) {
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
- ret = -EPERM;
- ibdev_err(&hr_dev->ib_dev,
- "RST2RST state is not supported\n");
- } else {
- ret = 0;
- }
-
+ if (cur_state == new_state && cur_state == IB_QPS_RESET)
goto out;
- }
ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
new_state);
@@ -1429,12 +1429,17 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
return cur + nreq >= hr_wq->wqe_cnt;
}
-void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
+int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
unsigned int reserved_from_bot;
unsigned int i;
+ qp_table->idx_table.spare_idx = kcalloc(hr_dev->caps.num_qps,
+ sizeof(u32), GFP_KERNEL);
+ if (!qp_table->idx_table.spare_idx)
+ return -ENOMEM;
+
mutex_init(&qp_table->scc_mutex);
mutex_init(&qp_table->bank_mutex);
xa_init(&hr_dev->qp_table_xa);
@@ -1452,6 +1457,8 @@ void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
HNS_ROCE_QP_BANK_NUM - 1;
hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min;
}
+
+ return 0;
}
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
@@ -1460,4 +1467,5 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
ida_destroy(&hr_dev->qp_table.bank[i].ida);
+ kfree(hr_dev->qp_table.idx_table.spare_idx);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 6f2992f443fa..6eee9deadd12 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -80,15 +80,19 @@ static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ struct hns_roce_ida *srq_ida = &hr_dev->srq_table.srq_ida;
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
+ int id;
- ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
- if (ret) {
- ibdev_err(ibdev, "failed to alloc SRQ number.\n");
+ id = ida_alloc_range(&srq_ida->ida, srq_ida->min, srq_ida->max,
+ GFP_KERNEL);
+ if (id < 0) {
+ ibdev_err(ibdev, "failed to alloc srq(%d).\n", id);
return -ENOMEM;
}
+ srq->srqn = (unsigned long)id;
ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
if (ret) {
@@ -132,7 +136,7 @@ err_xa:
err_put:
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
err_out:
- hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn);
+ ida_free(&srq_ida->ida, id);
return ret;
}
@@ -154,7 +158,7 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
wait_for_completion(&srq->free);
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
- hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn);
+ ida_free(&srq_table->srq_ida.ida, (int)srq->srqn);
}
static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
@@ -440,18 +444,14 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
return 0;
}
-int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
+void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ struct hns_roce_ida *srq_ida = &srq_table->srq_ida;
xa_init(&srq_table->xa);
- return hns_roce_bitmap_init(&srq_table->bitmap, hr_dev->caps.num_srqs,
- hr_dev->caps.num_srqs - 1,
- hr_dev->caps.reserved_srqs, 0);
-}
-
-void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev)
-{
- hns_roce_bitmap_cleanup(&hr_dev->srq_table.bitmap);
+ ida_init(&srq_ida->ida);
+ srq_ida->max = hr_dev->caps.num_srqs - 1;
+ srq_ida->min = hr_dev->caps.reserved_srqs;
}
diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h
index e3f5173706fe..78f598fdbccf 100644
--- a/drivers/infiniband/hw/irdma/protos.h
+++ b/drivers/infiniband/hw/irdma/protos.h
@@ -71,8 +71,6 @@ void irdma_qp_rem_qos(struct irdma_sc_qp *qp);
struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
struct irdma_sc_qp *qp);
void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi);
-u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev);
-void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id);
/* terminate functions*/
void irdma_terminate_send_fin(struct irdma_sc_qp *qp);
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 5bbe44e54f9a..e94470991fe0 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -1141,10 +1141,7 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa);
iwqp->kqp.dma_mem.va = NULL;
kfree(iwqp->kqp.sq_wrid_mem);
- iwqp->kqp.sq_wrid_mem = NULL;
kfree(iwqp->kqp.rq_wrid_mem);
- iwqp->kqp.rq_wrid_mem = NULL;
- kfree(iwqp);
}
/**
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 717147ed0519..4fc323402073 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -790,18 +790,19 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
/**
* irdma_create_qp - create qp
- * @ibpd: ptr of pd
+ * @ibqp: ptr of qp
* @init_attr: attributes for qp
* @udata: user data for create qp
*/
-static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+static int irdma_create_qp(struct ib_qp *ibqp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
{
+ struct ib_pd *ibpd = ibqp->pd;
struct irdma_pd *iwpd = to_iwpd(ibpd);
struct irdma_device *iwdev = to_iwdev(ibpd->device);
struct irdma_pci_f *rf = iwdev->rf;
- struct irdma_qp *iwqp;
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_create_qp_req req;
struct irdma_create_qp_resp uresp = {};
u32 qp_num = 0;
@@ -818,7 +819,7 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
err_code = irdma_validate_qp_attrs(init_attr, iwdev);
if (err_code)
- return ERR_PTR(err_code);
+ return err_code;
sq_size = init_attr->cap.max_send_wr;
rq_size = init_attr->cap.max_recv_wr;
@@ -831,10 +832,6 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
- iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
- if (!iwqp)
- return ERR_PTR(-ENOMEM);
-
qp = &iwqp->sc_qp;
qp->qp_uk.back_qp = iwqp;
qp->qp_uk.lock = &iwqp->lock;
@@ -847,10 +844,8 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
iwqp->q2_ctx_mem.size,
&iwqp->q2_ctx_mem.pa,
GFP_KERNEL);
- if (!iwqp->q2_ctx_mem.va) {
- err_code = -ENOMEM;
- goto error;
- }
+ if (!iwqp->q2_ctx_mem.va)
+ return -ENOMEM;
init_info.q2 = iwqp->q2_ctx_mem.va;
init_info.q2_pa = iwqp->q2_ctx_mem.pa;
@@ -999,17 +994,16 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
if (err_code) {
ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
irdma_destroy_qp(&iwqp->ibqp, udata);
- return ERR_PTR(err_code);
+ return err_code;
}
}
init_completion(&iwqp->free_qp);
- return &iwqp->ibqp;
+ return 0;
error:
irdma_free_qp_rsrc(iwqp);
-
- return ERR_PTR(err_code);
+ return err_code;
}
static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
@@ -2235,7 +2229,7 @@ static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
- iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
+ iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
*pbl = rdma_block_iter_dma_address(&biter);
@@ -4404,6 +4398,7 @@ static const struct ib_device_ops irdma_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
+ INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
};
/**
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index d41f03ccb0e1..9bbd695a9fd5 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -75,7 +75,8 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
list_add(&page->list, &context->db_page_list);
found:
- db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
+ db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
+ (virt & ~PAGE_MASK);
db->u.user_page = page;
++page->refcnt;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ae4c91b612ce..f367f4a4abff 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2577,6 +2577,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, mlx4_ib_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
};
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e856cf23a0a1..c60f6e9ac640 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -792,9 +792,8 @@ void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
-struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
+int mlx4_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
void mlx4_ib_drain_sq(struct ib_qp *qp);
void mlx4_ib_drain_rq(struct ib_qp *qp);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 50becc0e4b62..04a67b481608 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -200,7 +200,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
mtt_shift = mtt->page_shift;
mtt_size = 1ULL << mtt_shift;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+ for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
if (cur_start_addr + len == sg_dma_address(sg)) {
/* still the same block */
len += sg_dma_len(sg);
@@ -273,7 +273,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
*num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+ for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
/*
* Initialization - save the first chunk start as the
* current_block_start - block means contiguous pages.
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 4a2ef7daaded..8662f462e2a5 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1578,24 +1578,19 @@ static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp,
return 0;
}
-struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata) {
- struct ib_device *device = pd ? pd->device : init_attr->xrcd->device;
+int mlx4_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct ib_device *device = ibqp->device;
struct mlx4_ib_dev *dev = to_mdev(device);
- struct mlx4_ib_qp *qp;
+ struct mlx4_ib_qp *qp = to_mqp(ibqp);
+ struct ib_pd *pd = ibqp->pd;
int ret;
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
-
mutex_init(&qp->mutex);
ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata);
- if (ret) {
- kfree(qp);
- return ERR_PTR(ret);
- }
+ if (ret)
+ return ret;
if (init_attr->qp_type == IB_QPT_GSI &&
!(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) {
@@ -1618,7 +1613,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI;
}
}
- return &qp->ibqp;
+ return 0;
}
static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
@@ -1646,8 +1641,6 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
}
kfree(mqp->sqp);
- kfree(mqp);
-
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 9ca2e61807ec..6398e2f48579 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -78,7 +78,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
list_add(&page->list, &context->db_page_list);
found:
- db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
+ db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
+ (virt & ~PAGE_MASK);
db->u.user_page = page;
++page->refcnt;
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 7fcad9135276..3ad8f637c589 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -116,8 +116,6 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
goto err_free_tx;
}
- mutex_lock(&dev->devr.mutex);
-
if (dev->devr.ports[port_num - 1].gsi) {
mlx5_ib_warn(dev, "GSI QP already exists on port %d\n",
port_num);
@@ -147,35 +145,20 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
hw_init_attr.cap.max_inline_data = 0;
}
- gsi->rx_qp = mlx5_ib_create_qp(pd, &hw_init_attr, NULL);
+ gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
if (IS_ERR(gsi->rx_qp)) {
mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
PTR_ERR(gsi->rx_qp));
ret = PTR_ERR(gsi->rx_qp);
goto err_destroy_cq;
}
- gsi->rx_qp->device = pd->device;
- gsi->rx_qp->pd = pd;
- gsi->rx_qp->real_qp = gsi->rx_qp;
-
- gsi->rx_qp->qp_type = hw_init_attr.qp_type;
- gsi->rx_qp->send_cq = hw_init_attr.send_cq;
- gsi->rx_qp->recv_cq = hw_init_attr.recv_cq;
- gsi->rx_qp->event_handler = hw_init_attr.event_handler;
- spin_lock_init(&gsi->rx_qp->mr_lock);
- INIT_LIST_HEAD(&gsi->rx_qp->rdma_mrs);
- INIT_LIST_HEAD(&gsi->rx_qp->sig_mrs);
dev->devr.ports[attr->port_num - 1].gsi = gsi;
-
- mutex_unlock(&dev->devr.mutex);
-
return 0;
err_destroy_cq:
ib_free_cq(gsi->cq);
err_free_wrs:
- mutex_unlock(&dev->devr.mutex);
kfree(gsi->outstanding_wrs);
err_free_tx:
kfree(gsi->tx_qps);
@@ -190,16 +173,13 @@ int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp)
int qp_index;
int ret;
- mutex_lock(&dev->devr.mutex);
- ret = mlx5_ib_destroy_qp(gsi->rx_qp, NULL);
+ ret = ib_destroy_qp(gsi->rx_qp);
if (ret) {
mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n",
ret);
- mutex_unlock(&dev->devr.mutex);
return ret;
}
dev->devr.ports[port_num - 1].gsi = NULL;
- mutex_unlock(&dev->devr.mutex);
gsi->rx_qp = NULL;
for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) {
@@ -213,8 +193,6 @@ int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp)
kfree(gsi->outstanding_wrs);
kfree(gsi->tx_qps);
- kfree(mqp);
-
return 0;
}
@@ -339,23 +317,13 @@ err_destroy_qp:
WARN_ON_ONCE(qp);
}
-static void setup_qps(struct mlx5_ib_gsi_qp *gsi)
-{
- struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
- u16 qp_index;
-
- mutex_lock(&dev->devr.mutex);
- for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
- setup_qp(gsi, qp_index);
- mutex_unlock(&dev->devr.mutex);
-}
-
int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
int attr_mask)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+ u16 qp_index;
int ret;
mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state);
@@ -366,8 +334,11 @@ int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
return ret;
}
- if (to_mqp(gsi->rx_qp)->state == IB_QPS_RTS)
- setup_qps(gsi);
+ if (to_mqp(gsi->rx_qp)->state != IB_QPS_RTS)
+ return 0;
+
+ for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
+ setup_qp(gsi, qp_index);
return 0;
}
@@ -511,8 +482,8 @@ int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi)
{
- if (!gsi)
- return;
+ u16 qp_index;
- setup_qps(gsi);
+ for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
+ setup_qp(gsi, qp_index);
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 466f0a521940..8664bcf6d3f5 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1184,6 +1184,16 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
}
+ if (offsetofend(typeof(resp), dci_streams_caps) <= uhw_outlen) {
+ resp.response_length += sizeof(resp.dci_streams_caps);
+
+ resp.dci_streams_caps.max_log_num_concurent =
+ MLX5_CAP_GEN(mdev, log_max_dci_stream_channels);
+
+ resp.dci_streams_caps.max_log_num_errored =
+ MLX5_CAP_GEN(mdev, log_max_dci_errored_streams);
+ }
+
if (uhw_outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
@@ -2501,6 +2511,13 @@ static void pkey_change_handler(struct work_struct *work)
container_of(work, struct mlx5_ib_port_resources,
pkey_change_work);
+ if (!ports->gsi)
+ /*
+ * We got this event before device was fully configured
+ * and MAD registration code wasn't called/finished yet.
+ */
+ return;
+
mlx5_ib_gsi_pkey_change(ports->gsi);
}
@@ -2795,33 +2812,16 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
if (!MLX5_CAP_GEN(dev->mdev, xrc))
return -EOPNOTSUPP;
- mutex_init(&devr->mutex);
-
- devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd);
- if (!devr->p0)
- return -ENOMEM;
-
- devr->p0->device = ibdev;
- devr->p0->uobject = NULL;
- atomic_set(&devr->p0->usecnt, 0);
-
- ret = mlx5_ib_alloc_pd(devr->p0, NULL);
- if (ret)
- goto error0;
+ devr->p0 = ib_alloc_pd(ibdev, 0);
+ if (IS_ERR(devr->p0))
+ return PTR_ERR(devr->p0);
- devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq);
- if (!devr->c0) {
- ret = -ENOMEM;
+ devr->c0 = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
+ if (IS_ERR(devr->c0)) {
+ ret = PTR_ERR(devr->c0);
goto error1;
}
- devr->c0->device = &dev->ib_dev;
- atomic_set(&devr->c0->usecnt, 0);
-
- ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL);
- if (ret)
- goto err_create_cq;
-
ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
if (ret)
goto error2;
@@ -2836,45 +2836,22 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
attr.srq_type = IB_SRQT_XRC;
attr.ext.cq = devr->c0;
- devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq);
- if (!devr->s0) {
- ret = -ENOMEM;
- goto error4;
- }
-
- devr->s0->device = &dev->ib_dev;
- devr->s0->pd = devr->p0;
- devr->s0->srq_type = IB_SRQT_XRC;
- devr->s0->ext.cq = devr->c0;
- ret = mlx5_ib_create_srq(devr->s0, &attr, NULL);
- if (ret)
+ devr->s0 = ib_create_srq(devr->p0, &attr);
+ if (IS_ERR(devr->s0)) {
+ ret = PTR_ERR(devr->s0);
goto err_create;
-
- atomic_inc(&devr->s0->ext.cq->usecnt);
- atomic_inc(&devr->p0->usecnt);
- atomic_set(&devr->s0->usecnt, 0);
+ }
memset(&attr, 0, sizeof(attr));
attr.attr.max_sge = 1;
attr.attr.max_wr = 1;
attr.srq_type = IB_SRQT_BASIC;
- devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq);
- if (!devr->s1) {
- ret = -ENOMEM;
- goto error5;
- }
-
- devr->s1->device = &dev->ib_dev;
- devr->s1->pd = devr->p0;
- devr->s1->srq_type = IB_SRQT_BASIC;
- devr->s1->ext.cq = devr->c0;
- ret = mlx5_ib_create_srq(devr->s1, &attr, NULL);
- if (ret)
+ devr->s1 = ib_create_srq(devr->p0, &attr);
+ if (IS_ERR(devr->s1)) {
+ ret = PTR_ERR(devr->s1);
goto error6;
-
- atomic_inc(&devr->p0->usecnt);
- atomic_set(&devr->s1->usecnt, 0);
+ }
for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
INIT_WORK(&devr->ports[port].pkey_change_work,
@@ -2883,23 +2860,15 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
return 0;
error6:
- kfree(devr->s1);
-error5:
- mlx5_ib_destroy_srq(devr->s0, NULL);
+ ib_destroy_srq(devr->s0);
err_create:
- kfree(devr->s0);
-error4:
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
error3:
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
error2:
- mlx5_ib_destroy_cq(devr->c0, NULL);
-err_create_cq:
- kfree(devr->c0);
+ ib_destroy_cq(devr->c0);
error1:
- mlx5_ib_dealloc_pd(devr->p0, NULL);
-error0:
- kfree(devr->p0);
+ ib_dealloc_pd(devr->p0);
return ret;
}
@@ -2908,20 +2877,21 @@ static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
struct mlx5_ib_resources *devr = &dev->devr;
int port;
- mlx5_ib_destroy_srq(devr->s1, NULL);
- kfree(devr->s1);
- mlx5_ib_destroy_srq(devr->s0, NULL);
- kfree(devr->s0);
- mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
- mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
- mlx5_ib_destroy_cq(devr->c0, NULL);
- kfree(devr->c0);
- mlx5_ib_dealloc_pd(devr->p0, NULL);
- kfree(devr->p0);
-
- /* Make sure no change P_Key work items are still executing */
+ /*
+ * Make sure no change P_Key work items are still executing.
+ *
+ * At this stage, the mlx5_ib_event should be unregistered
+ * and it ensures that no new works are added.
+ */
for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
cancel_work_sync(&devr->ports[port].pkey_change_work);
+
+ ib_destroy_srq(devr->s1);
+ ib_destroy_srq(devr->s0);
+ mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
+ mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
+ ib_destroy_cq(devr->c0);
+ ib_dealloc_pd(devr->p0);
}
static u32 get_core_cap_flags(struct ib_device *ibdev,
@@ -3799,6 +3769,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
};
@@ -4061,7 +4032,7 @@ static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
mlx5_ib_warn(dev, "mr cache cleanup failed\n");
if (dev->umrc.qp)
- mlx5_ib_destroy_qp(dev->umrc.qp, NULL);
+ ib_destroy_qp(dev->umrc.qp);
if (dev->umrc.cq)
ib_free_cq(dev->umrc.cq);
if (dev->umrc.pd)
@@ -4114,23 +4085,17 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
init_attr->cap.max_send_sge = 1;
init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
init_attr->port_num = 1;
- qp = mlx5_ib_create_qp(pd, init_attr, NULL);
+ qp = ib_create_qp(pd, init_attr);
if (IS_ERR(qp)) {
mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
ret = PTR_ERR(qp);
goto error_3;
}
- qp->device = &dev->ib_dev;
- qp->real_qp = qp;
- qp->uobject = NULL;
- qp->qp_type = MLX5_IB_QPT_REG_UMR;
- qp->send_cq = init_attr->send_cq;
- qp->recv_cq = init_attr->recv_cq;
attr->qp_state = IB_QPS_INIT;
attr->port_num = 1;
- ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
- IB_QP_PORT, NULL);
+ ret = ib_modify_qp(qp, attr,
+ IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT);
if (ret) {
mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
goto error_4;
@@ -4140,7 +4105,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
attr->qp_state = IB_QPS_RTR;
attr->path_mtu = IB_MTU_256;
- ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
+ ret = ib_modify_qp(qp, attr, IB_QP_STATE);
if (ret) {
mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
goto error_4;
@@ -4148,7 +4113,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
memset(attr, 0, sizeof(*attr));
attr->qp_state = IB_QPS_RTS;
- ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
+ ret = ib_modify_qp(qp, attr, IB_QP_STATE);
if (ret) {
mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
goto error_4;
@@ -4171,7 +4136,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
return 0;
error_4:
- mlx5_ib_destroy_qp(qp, NULL);
+ ib_destroy_qp(qp);
dev->umrc.qp = NULL;
error_3:
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 585fb00bdce8..bf20a388eabe 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -795,8 +795,6 @@ struct mlx5_ib_resources {
struct ib_srq *s0;
struct ib_srq *s1;
struct mlx5_ib_port_resources ports[2];
- /* Protects changes to the port resources */
- struct mutex mutex;
};
struct mlx5_ib_counters {
@@ -1221,9 +1219,8 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
-struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
+int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 3f1c5a4f158b..3be36ebbf67a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -995,7 +995,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
{
const size_t xlt_chunk_align =
- MLX5_UMR_MTT_ALIGNMENT / sizeof(ent_size);
+ MLX5_UMR_MTT_ALIGNMENT / ent_size;
size_t size;
void *res = NULL;
@@ -1024,7 +1024,7 @@ static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
if (size > MLX5_SPARE_UMR_CHUNK) {
size = MLX5_SPARE_UMR_CHUNK;
- *nents = get_order(size) / ent_size;
+ *nents = size / ent_size;
res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
get_order(size));
if (res)
@@ -1226,7 +1226,8 @@ int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
orig_sg_length = sg.length;
cur_mtt = mtt;
- rdma_for_each_block (mr->umem->sg_head.sgl, &biter, mr->umem->nmap,
+ rdma_for_each_block (mr->umem->sgt_append.sgt.sgl, &biter,
+ mr->umem->sgt_append.sgt.nents,
BIT(mr->page_shift)) {
if (cur_mtt == (void *)mtt + sg.length) {
dma_sync_single_for_device(ddev, sg.addr, sg.length,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index a77db29f8391..b2fca110346c 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1906,7 +1906,6 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev,
static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct mlx5_create_qp_params *params)
{
- struct mlx5_ib_create_qp *ucmd = params->ucmd;
struct ib_qp_init_attr *attr = params->attr;
u32 uidx = params->uidx;
struct mlx5_ib_resources *devr = &dev->devr;
@@ -1926,8 +1925,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!in)
return -ENOMEM;
- if (MLX5_CAP_GEN(mdev, ece_support) && ucmd)
- MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC);
@@ -1982,6 +1979,167 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return 0;
}
+static int create_dci(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *init_attr = params->attr;
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ struct ib_udata *udata = params->udata;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
+ struct mlx5_ib_qp_base *base;
+ int ts_format;
+ int mlx5_st;
+ void *qpc;
+ u32 *in;
+ int err;
+
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
+
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ base = &qp->trans_qp.base;
+
+ qp->has_rq = qp_has_rq(init_attr);
+ err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
+ }
+
+ if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
+ ucmd->rq_wqe_count != qp->rq.wqe_cnt)
+ return -EINVAL;
+
+ if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
+ return -EINVAL;
+
+ ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+
+ if (ts_format < 0)
+ return ts_format;
+
+ err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
+ &inlen, base, ucmd);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, mlx5_st);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn);
+
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
+ MLX5_SET(qpc, qpc, wq_signature, 1);
+
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
+ MLX5_SET(qpc, qpc, cd_master, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
+ MLX5_SET(qpc, qpc, cd_slave_send, 1);
+ if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE)
+ configure_requester_scat_cqe(dev, qp, init_attr, qpc);
+
+ if (qp->rq.wqe_cnt) {
+ MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
+ }
+
+ if (qp->flags_en & MLX5_QP_FLAG_DCI_STREAM) {
+ MLX5_SET(qpc, qpc, log_num_dci_stream_channels,
+ ucmd->dci_streams.log_num_concurent);
+ MLX5_SET(qpc, qpc, log_num_dci_errored_streams,
+ ucmd->dci_streams.log_num_errored);
+ }
+
+ MLX5_SET(qpc, qpc, ts_format, ts_format);
+ MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
+
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
+
+ /* Set default resources */
+ if (init_attr->srq) {
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(init_attr->srq)->msrq.srqn);
+ } else {
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(devr->s1)->msrq.srqn);
+ }
+
+ if (init_attr->send_cq)
+ MLX5_SET(qpc, qpc, cqn_snd,
+ to_mcq(init_attr->send_cq)->mcq.cqn);
+
+ if (init_attr->recv_cq)
+ MLX5_SET(qpc, qpc, cqn_rcv,
+ to_mcq(init_attr->recv_cq)->mcq.cqn);
+
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
+
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ /* Special case to clean flag */
+ qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
+ }
+
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
+
+ kvfree(in);
+ if (err)
+ goto err_create;
+
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
+
+ get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq,
+ &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* Maintain device to QPs access, needed for further handling via reset
+ * flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling via reset flow
+ */
+ if (send_cq)
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ if (recv_cq)
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ return 0;
+
+err_create:
+ destroy_qp(dev, qp, base, udata);
+ return err;
+}
+
static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_qp *qp,
struct mlx5_create_qp_params *params)
@@ -2512,7 +2670,6 @@ static int create_dct(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
qp->state = IB_QPS_RESET;
- rdma_restrack_no_track(&qp->ibqp.res);
return 0;
}
@@ -2653,6 +2810,9 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp);
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_DCI_STREAM,
+ MLX5_CAP_GEN(mdev, log_max_dci_stream_channels),
+ qp);
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
@@ -2847,6 +3007,10 @@ static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
switch (qp->type) {
case MLX5_IB_QPT_DCT:
err = create_dct(dev, pd, qp, params);
+ rdma_restrack_no_track(&qp->ibqp.res);
+ break;
+ case MLX5_IB_QPT_DCI:
+ err = create_dci(dev, pd, qp, params);
break;
case IB_QPT_XRC_TGT:
err = create_xrc_tgt_qp(dev, qp, params);
@@ -2854,6 +3018,10 @@ static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
case IB_QPT_GSI:
err = mlx5_ib_create_gsi(pd, qp, params->attr);
break;
+ case MLX5_IB_QPT_HW_GSI:
+ case MLX5_IB_QPT_REG_UMR:
+ rdma_restrack_no_track(&qp->ibqp.res);
+ fallthrough;
default:
if (params->udata)
err = create_user_qp(dev, pd, qp, params);
@@ -2942,7 +3110,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
}
kfree(mqp->dct.in);
- kfree(mqp);
return 0;
}
@@ -2980,25 +3147,23 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
return ret ? 0 : -EINVAL;
}
-struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
- struct ib_udata *udata)
+int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
{
struct mlx5_create_qp_params params = {};
- struct mlx5_ib_dev *dev;
- struct mlx5_ib_qp *qp;
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct ib_pd *pd = ibqp->pd;
enum ib_qp_type type;
int err;
- dev = pd ? to_mdev(pd->device) :
- to_mdev(to_mxrcd(attr->xrcd)->ibxrcd.device);
-
err = check_qp_type(dev, attr, &type);
if (err)
- return ERR_PTR(err);
+ return err;
err = check_valid_flow(dev, pd, attr, udata);
if (err)
- return ERR_PTR(err);
+ return err;
params.udata = udata;
params.uidx = MLX5_IB_DEFAULT_UIDX;
@@ -3008,49 +3173,43 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
if (udata) {
err = process_udata_size(dev, &params);
if (err)
- return ERR_PTR(err);
+ return err;
err = check_ucmd_data(dev, &params);
if (err)
- return ERR_PTR(err);
+ return err;
params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL);
if (!params.ucmd)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
err = ib_copy_from_udata(params.ucmd, udata, params.inlen);
if (err)
goto free_ucmd;
}
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- err = -ENOMEM;
- goto free_ucmd;
- }
-
mutex_init(&qp->mutex);
qp->type = type;
if (udata) {
err = process_vendor_flags(dev, qp, params.ucmd, attr);
if (err)
- goto free_qp;
+ goto free_ucmd;
err = get_qp_uidx(qp, &params);
if (err)
- goto free_qp;
+ goto free_ucmd;
}
err = process_create_flags(dev, qp, attr);
if (err)
- goto free_qp;
+ goto free_ucmd;
err = check_qp_attr(dev, qp, attr);
if (err)
- goto free_qp;
+ goto free_ucmd;
err = create_qp(dev, pd, qp, &params);
if (err)
- goto free_qp;
+ goto free_ucmd;
kfree(params.ucmd);
params.ucmd = NULL;
@@ -3065,7 +3224,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
if (err)
goto destroy_qp;
- return &qp->ibqp;
+ return 0;
destroy_qp:
switch (qp->type) {
@@ -3076,22 +3235,12 @@ destroy_qp:
mlx5_ib_destroy_gsi(qp);
break;
default:
- /*
- * These lines below are temp solution till QP allocation
- * will be moved to be under IB/core responsiblity.
- */
- qp->ibqp.send_cq = attr->send_cq;
- qp->ibqp.recv_cq = attr->recv_cq;
- qp->ibqp.pd = pd;
destroy_qp_common(dev, qp, udata);
}
- qp = NULL;
-free_qp:
- kfree(qp);
free_ucmd:
kfree(params.ucmd);
- return ERR_PTR(err);
+ return err;
}
int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
@@ -3106,9 +3255,6 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
return mlx5_ib_destroy_dct(mqp);
destroy_qp_common(dev, mqp, udata);
-
- kfree(mqp);
-
return 0;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 2cdf686203c1..97287c544da8 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -617,9 +617,9 @@ static void mthca_free_eq(struct mthca_dev *dev,
mthca_free_mr(dev, &eq->mr);
for (i = 0; i < npages; ++i)
- pci_free_consistent(dev->pdev, PAGE_SIZE,
- eq->page_list[i].buf,
- dma_unmap_addr(&eq->page_list[i], mapping));
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ eq->page_list[i].buf,
+ dma_unmap_addr(&eq->page_list[i], mapping));
kfree(eq->page_list);
mthca_free_mailbox(dev, mailbox);
@@ -739,17 +739,18 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
if (!dev->eq_table.icm_page)
return -ENOMEM;
- dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
+ dev->eq_table.icm_dma =
+ dma_map_page(&dev->pdev->dev, dev->eq_table.icm_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&dev->pdev->dev, dev->eq_table.icm_dma)) {
__free_page(dev->eq_table.icm_page);
return -ENOMEM;
}
ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt);
if (ret) {
- pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&dev->pdev->dev, dev->eq_table.icm_dma,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(dev->eq_table.icm_page);
}
@@ -759,8 +760,8 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
void mthca_unmap_eq_icm(struct mthca_dev *dev)
{
mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1);
- pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&dev->pdev->dev, dev->eq_table.icm_dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
__free_page(dev->eq_table.icm_page);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index fe9654a7af71..f507c4cd46d3 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -937,26 +937,15 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
goto err_free_res;
}
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
- "consistent PCI DMA mask.\n");
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
- "aborting.\n");
- goto err_free_res;
- }
- }
/* We can handle large RDMA requests, so allow larger segments. */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index fa808582b08b..f2734a5c5f26 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -66,8 +66,8 @@ static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *
int i;
if (chunk->nsg > 0)
- pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages,
+ DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
__free_pages(sg_page(&chunk->mem[i]),
@@ -184,9 +184,10 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
if (coherent)
++chunk->nsg;
else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {
- chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
- chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ chunk->nsg =
+ dma_map_sg(&dev->pdev->dev, chunk->mem,
+ chunk->npages,
+ DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
@@ -204,9 +205,8 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
}
if (!coherent && chunk) {
- chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
- chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ chunk->nsg = dma_map_sg(&dev->pdev->dev, chunk->mem,
+ chunk->npages, DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
@@ -480,7 +480,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE,
uaddr & ~PAGE_MASK);
- ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
+ ret = dma_map_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
+ DMA_TO_DEVICE);
if (ret < 0) {
unpin_user_page(pages[0]);
goto out;
@@ -489,7 +490,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
mthca_uarc_virt(dev, uar, i));
if (ret) {
- pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
+ dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
+ DMA_TO_DEVICE);
unpin_user_page(sg_page(&db_tab->page[i].mem));
goto out;
}
@@ -555,7 +557,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
if (db_tab->page[i].uvirt) {
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
- pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
+ dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
+ DMA_TO_DEVICE);
unpin_user_page(sg_page(&db_tab->page[i].mem));
}
}
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index adf4fcf0fee4..ceee23ebc0f2 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -459,52 +459,45 @@ static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
return 0;
}
-static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+static int mthca_create_qp(struct ib_qp *ibqp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct mthca_ucontext *context = rdma_udata_to_drv_context(
udata, struct mthca_ucontext, ibucontext);
struct mthca_create_qp ucmd;
- struct mthca_qp *qp;
+ struct mthca_qp *qp = to_mqp(ibqp);
+ struct mthca_dev *dev = to_mdev(ibqp->device);
int err;
if (init_attr->create_flags)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
switch (init_attr->qp_type) {
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_UD:
{
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
-
if (udata) {
- if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
- kfree(qp);
- return ERR_PTR(-EFAULT);
- }
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+ return -EFAULT;
- err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
+ err = mthca_map_user_db(dev, &context->uar,
context->db_tab,
- ucmd.sq_db_index, ucmd.sq_db_page);
- if (err) {
- kfree(qp);
- return ERR_PTR(err);
- }
+ ucmd.sq_db_index,
+ ucmd.sq_db_page);
+ if (err)
+ return err;
- err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
+ err = mthca_map_user_db(dev, &context->uar,
context->db_tab,
- ucmd.rq_db_index, ucmd.rq_db_page);
+ ucmd.rq_db_index,
+ ucmd.rq_db_page);
if (err) {
- mthca_unmap_user_db(to_mdev(pd->device),
- &context->uar,
+ mthca_unmap_user_db(dev, &context->uar,
context->db_tab,
ucmd.sq_db_index);
- kfree(qp);
- return ERR_PTR(err);
+ return err;
}
qp->mr.ibmr.lkey = ucmd.lkey;
@@ -512,20 +505,16 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
qp->rq.db_index = ucmd.rq_db_index;
}
- err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
+ err = mthca_alloc_qp(dev, to_mpd(ibqp->pd),
to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq),
init_attr->qp_type, init_attr->sq_sig_type,
&init_attr->cap, qp, udata);
if (err && udata) {
- mthca_unmap_user_db(to_mdev(pd->device),
- &context->uar,
- context->db_tab,
+ mthca_unmap_user_db(dev, &context->uar, context->db_tab,
ucmd.sq_db_index);
- mthca_unmap_user_db(to_mdev(pd->device),
- &context->uar,
- context->db_tab,
+ mthca_unmap_user_db(dev, &context->uar, context->db_tab,
ucmd.rq_db_index);
}
@@ -535,34 +524,28 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
case IB_QPT_SMI:
case IB_QPT_GSI:
{
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
qp->sqp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
- if (!qp->sqp) {
- kfree(qp);
- return ERR_PTR(-ENOMEM);
- }
+ if (!qp->sqp)
+ return -ENOMEM;
qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
- err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
+ err = mthca_alloc_sqp(dev, to_mpd(ibqp->pd),
to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq),
init_attr->sq_sig_type, &init_attr->cap,
- qp->ibqp.qp_num, init_attr->port_num,
- qp, udata);
+ qp->ibqp.qp_num, init_attr->port_num, qp,
+ udata);
break;
}
default:
/* Don't support raw QPs */
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
if (err) {
kfree(qp->sqp);
- kfree(qp);
- return ERR_PTR(err);
+ return err;
}
init_attr->cap.max_send_wr = qp->sq.max;
@@ -571,7 +554,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
init_attr->cap.max_recv_sge = qp->rq.max_gs;
init_attr->cap.max_inline_data = qp->max_inline_data;
- return &qp->ibqp;
+ return 0;
}
static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
@@ -594,7 +577,6 @@ static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
}
mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
kfree(to_mqp(qp)->sqp);
- kfree(to_mqp(qp));
return 0;
}
@@ -1121,6 +1103,7 @@ static const struct ib_device_ops mthca_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, mthca_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, mthca_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, mthca_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext),
};
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index f329db0c591f..7abf6cf1e937 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -185,6 +185,7 @@ static const struct ib_device_ops ocrdma_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, ocrdma_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, ocrdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, ocrdma_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, ocrdma_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, ocrdma_ucontext, ibucontext),
};
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 58619ce64d0d..735123d0e9ec 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1288,19 +1288,19 @@ static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
}
}
-struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *attrs,
- struct ib_udata *udata)
+int ocrdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
{
int status;
+ struct ib_pd *ibpd = ibqp->pd;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_qp *qp;
- struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+ struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
struct ocrdma_create_qp_ureq ureq;
u16 dpp_credit_lmt, dpp_offset;
if (attrs->create_flags)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
if (status)
@@ -1309,12 +1309,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
memset(&ureq, 0, sizeof(ureq));
if (udata) {
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
- return ERR_PTR(-EFAULT);
- }
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- status = -ENOMEM;
- goto gen_err;
+ return -EFAULT;
}
ocrdma_set_qp_init_params(qp, pd, attrs);
if (udata == NULL)
@@ -1349,7 +1344,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
ocrdma_store_gsi_qp_cq(dev, attrs);
qp->ibqp.qp_num = qp->id;
mutex_unlock(&dev->dev_lock);
- return &qp->ibqp;
+ return 0;
cpy_err:
ocrdma_del_qpn_map(dev, qp);
@@ -1359,10 +1354,9 @@ mbx_err:
mutex_unlock(&dev->dev_lock);
kfree(qp->wqe_wr_id_tbl);
kfree(qp->rqe_wr_id_tbl);
- kfree(qp);
pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
gen_err:
- return ERR_PTR(status);
+ return status;
}
int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
@@ -1731,7 +1725,6 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
kfree(qp->wqe_wr_id_tbl);
kfree(qp->rqe_wr_id_tbl);
- kfree(qp);
return 0;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index b1c5fad81603..b73d742a520c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -75,9 +75,8 @@ int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
-struct ib_qp *ocrdma_create_qp(struct ib_pd *,
- struct ib_qp_init_attr *attrs,
- struct ib_udata *);
+int ocrdma_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata);
int _ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
int attr_mask);
int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index de98e0604f91..755930be01b8 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -233,6 +233,7 @@ static const struct ib_device_ops qedr_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, qedr_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd),
INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
index 13e5e6bbec99..05307c1488b8 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -319,20 +319,19 @@ err1:
return rc;
}
-struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
- struct ib_qp_init_attr *attrs,
- struct qedr_qp *qp)
+int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs,
+ struct qedr_qp *qp)
{
int rc;
rc = qedr_check_gsi_qp_attrs(dev, attrs);
if (rc)
- return ERR_PTR(rc);
+ return rc;
rc = qedr_ll2_start(dev, attrs, qp);
if (rc) {
DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
- return ERR_PTR(rc);
+ return rc;
}
/* create QP */
@@ -359,7 +358,7 @@ struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
- return &qp->ibqp;
+ return 0;
err:
kfree(qp->rqe_wr_id);
@@ -368,7 +367,7 @@ err:
if (rc)
DP_ERR(dev, "create gsi qp: failed destroy on create\n");
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
int qedr_destroy_gsi_qp(struct qedr_dev *dev)
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.h b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
index d46dcd3f6424..f3432f035ec6 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.h
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
@@ -50,9 +50,8 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
-struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
- struct ib_qp_init_attr *attrs,
- struct qedr_qp *qp);
+int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs,
+ struct qedr_qp *qp);
void qedr_store_gsi_qp_cq(struct qedr_dev *dev,
struct qedr_qp *qp, struct ib_qp_init_attr *attrs);
int qedr_destroy_gsi_qp(struct qedr_dev *dev);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index fdc47ef7d861..3fbf172dbbef 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1339,6 +1339,15 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
return rc;
}
+static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
+{
+ qed_chain_reset(&qph->pbl);
+ qph->prod = 0;
+ qph->cons = 0;
+ qph->wqe_cons = 0;
+ qph->db_data.data.value = cpu_to_le16(0);
+}
+
static void qedr_set_common_qp_params(struct qedr_dev *dev,
struct qedr_qp *qp,
struct qedr_pd *pd,
@@ -1354,9 +1363,13 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
qp->qp_type = attrs->qp_type;
qp->max_inline_data = attrs->cap.max_inline_data;
qp->state = QED_ROCE_QP_STATE_RESET;
+
+ qp->prev_wqe_size = 0;
+
qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
qp->dev = dev;
if (qedr_qp_has_sq(qp)) {
+ qedr_reset_qp_hwq_info(&qp->sq);
qp->sq.max_sges = attrs->cap.max_send_sge;
qp->sq_cq = get_qedr_cq(attrs->send_cq);
DP_DEBUG(dev, QEDR_MSG_QP,
@@ -1368,6 +1381,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
qp->srq = get_qedr_srq(attrs->srq);
if (qedr_qp_has_rq(qp)) {
+ qedr_reset_qp_hwq_info(&qp->rq);
qp->rq_cq = get_qedr_cq(attrs->recv_cq);
qp->rq.max_sges = attrs->cap.max_recv_sge;
DP_DEBUG(dev, QEDR_MSG_QP,
@@ -1481,7 +1495,7 @@ static int qedr_init_srq_user_params(struct ib_udata *udata,
return PTR_ERR(srq->prod_umem);
}
- sg = srq->prod_umem->sg_head.sgl;
+ sg = srq->prod_umem->sgt_append.sgt.sgl;
srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
return 0;
@@ -2239,34 +2253,30 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
return 0;
}
-struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *attrs,
- struct ib_udata *udata)
+int qedr_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
{
struct qedr_xrcd *xrcd = NULL;
- struct qedr_pd *pd = NULL;
- struct qedr_dev *dev;
- struct qedr_qp *qp;
- struct ib_qp *ibqp;
+ struct ib_pd *ibpd = ibqp->pd;
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
int rc = 0;
if (attrs->create_flags)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
- if (attrs->qp_type == IB_QPT_XRC_TGT) {
+ if (attrs->qp_type == IB_QPT_XRC_TGT)
xrcd = get_qedr_xrcd(attrs->xrcd);
- dev = get_qedr_dev(xrcd->ibxrcd.device);
- } else {
+ else
pd = get_qedr_pd(ibpd);
- dev = get_qedr_dev(ibpd->device);
- }
DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
udata ? "user library" : "kernel", pd);
rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
if (rc)
- return ERR_PTR(rc);
+ return rc;
DP_DEBUG(dev, QEDR_MSG_QP,
"create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
@@ -2276,20 +2286,10 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
get_qedr_cq(attrs->recv_cq),
attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- DP_ERR(dev, "create qp: failed allocating memory\n");
- return ERR_PTR(-ENOMEM);
- }
-
qedr_set_common_qp_params(dev, qp, pd, attrs);
- if (attrs->qp_type == IB_QPT_GSI) {
- ibqp = qedr_create_gsi_qp(dev, attrs, qp);
- if (IS_ERR(ibqp))
- kfree(qp);
- return ibqp;
- }
+ if (attrs->qp_type == IB_QPT_GSI)
+ return qedr_create_gsi_qp(dev, attrs, qp);
if (udata || xrcd)
rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
@@ -2297,7 +2297,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
if (rc)
- goto out_free_qp;
+ return rc;
qp->ibqp.qp_num = qp->qp_id;
@@ -2307,14 +2307,11 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
goto out_free_qp_resources;
}
- return &qp->ibqp;
+ return 0;
out_free_qp_resources:
qedr_free_qp_resources(dev, qp, udata);
-out_free_qp:
- kfree(qp);
-
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
@@ -2359,15 +2356,6 @@ static enum qed_roce_qp_state qedr_get_state_from_ibqp(
}
}
-static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
-{
- qed_chain_reset(&qph->pbl);
- qph->prod = 0;
- qph->cons = 0;
- qph->wqe_cons = 0;
- qph->db_data.data.value = cpu_to_le16(0);
-}
-
static int qedr_update_qp_state(struct qedr_dev *dev,
struct qedr_qp *qp,
enum qed_roce_qp_state cur_state,
@@ -2382,9 +2370,6 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
case QED_ROCE_QP_STATE_RESET:
switch (new_state) {
case QED_ROCE_QP_STATE_INIT:
- qp->prev_wqe_size = 0;
- qedr_reset_qp_hwq_info(&qp->sq);
- qedr_reset_qp_hwq_info(&qp->rq);
break;
default:
status = -EINVAL;
@@ -2874,8 +2859,6 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (rdma_protocol_iwarp(&dev->ibdev, 1))
qedr_iw_qp_rem_ref(&qp->ibqp);
- else
- kfree(qp);
return 0;
}
@@ -2996,7 +2979,11 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
if (rc) {
- DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ if (rc == -EINVAL)
+ DP_ERR(dev, "Out of MR resources\n");
+ else
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
+
goto err1;
}
@@ -3091,7 +3078,11 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
if (rc) {
- DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ if (rc == -EINVAL)
+ DP_ERR(dev, "Out of MR resources\n");
+ else
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
+
goto err0;
}
@@ -3221,7 +3212,11 @@ struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
if (rc) {
- DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ if (rc == -EINVAL)
+ DP_ERR(dev, "Out of MR resources\n");
+ else
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
+
goto err1;
}
@@ -3915,12 +3910,6 @@ int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->q_lock, flags);
- if (qp->state == QED_ROCE_QP_STATE_RESET) {
- spin_unlock_irqrestore(&qp->q_lock, flags);
- *bad_wr = wr;
- return -EINVAL;
- }
-
while (wr) {
int i;
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 34ad47515861..031687dafc61 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -56,8 +56,8 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
-struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
- struct ib_udata *);
+int qedr_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata);
int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index c60e79d214a1..63854f4b6524 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -429,8 +429,8 @@ cleanup:
dd->f_put_tid(dd, &tidbase[tid],
RCVHQ_RCV_TYPE_EXPECTED,
dd->tidinvalid);
- pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&dd->pcidev->dev, phys,
+ PAGE_SIZE, DMA_FROM_DEVICE);
dd->pageshadow[ctxttid + tid] = NULL;
}
}
@@ -544,8 +544,8 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
*/
dd->f_put_tid(dd, &tidbase[tid],
RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
- pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE,
+ DMA_FROM_DEVICE);
qib_release_user_pages(&p, 1);
}
}
@@ -1781,8 +1781,8 @@ static void unlock_expected_tids(struct qib_ctxtdata *rcd)
phys = dd->physshadow[i];
dd->physshadow[i] = dd->tidinvalid;
dd->pageshadow[i] = NULL;
- pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE,
+ DMA_FROM_DEVICE);
qib_release_user_pages(&p, 1);
cnt++;
}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index b5a78576c48b..d1a72e89e297 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1335,8 +1335,8 @@ static void cleanup_device_data(struct qib_devdata *dd)
for (i = ctxt_tidbase; i < maxtid; i++) {
if (!tmpp[i])
continue;
- pci_unmap_page(dd->pcidev, tmpd[i],
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&dd->pcidev->dev, tmpd[i],
+ PAGE_SIZE, DMA_FROM_DEVICE);
qib_release_user_pages(&tmpp[i], 1);
tmpp[i] = NULL;
}
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index d57e49de6650..452e2355d24e 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -403,9 +403,11 @@ static ssize_t diagc_attr_store(struct ib_device *ibdev, u32 port_num,
}
#define QIB_DIAGC_ATTR(N) \
+ static_assert(&((struct qib_ibport *)0)->rvp.n_##N != (u64 *)NULL); \
static struct qib_diagc_attr qib_diagc_attr_##N = { \
.attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store), \
- .counter = &((struct qib_ibport *)0)->rvp.n_##N - (u64 *)0, \
+ .counter = \
+ offsetof(struct qib_ibport, rvp.n_##N) / sizeof(u64) \
}
QIB_DIAGC_ATTR(rc_resends);
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 5d6cf7427431..f4b5f05058e4 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -60,15 +60,15 @@ int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
{
dma_addr_t phys;
- phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(hwdev, phys))
+ phys = dma_map_page(&hwdev->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&hwdev->dev, phys))
return -ENOMEM;
if (!phys) {
- pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
- phys = pci_map_page(hwdev, page, 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(hwdev, phys))
+ dma_unmap_page(&hwdev->dev, phys, PAGE_SIZE, DMA_FROM_DEVICE);
+ phys = dma_map_page(&hwdev->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&hwdev->dev, phys))
return -ENOMEM;
/*
* FIXME: If we get 0 again, we should keep this page,
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index c49f9e19d926..228e9a36dad0 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -360,6 +360,7 @@ static const struct ib_device_ops usnic_dev_ops = {
.reg_user_mr = usnic_ib_reg_mr,
INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_qp, usnic_ib_qp_grp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext),
};
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 0cdb156e165e..3b60fa9cb58d 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -665,13 +665,12 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
return 0;
}
-struct usnic_ib_qp_grp *
-usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
- struct usnic_ib_pd *pd,
- struct usnic_vnic_res_spec *res_spec,
- struct usnic_transport_spec *transport_spec)
+int usnic_ib_qp_grp_create(struct usnic_ib_qp_grp *qp_grp,
+ struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
+ struct usnic_ib_pd *pd,
+ struct usnic_vnic_res_spec *res_spec,
+ struct usnic_transport_spec *transport_spec)
{
- struct usnic_ib_qp_grp *qp_grp;
int err;
enum usnic_transport_type transport = transport_spec->trans_type;
struct usnic_ib_qp_grp_flow *qp_flow;
@@ -684,20 +683,15 @@ usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
usnic_err("Spec does not meet minimum req for transport %d\n",
transport);
log_spec(res_spec);
- return ERR_PTR(err);
+ return err;
}
- qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
- if (!qp_grp)
- return NULL;
-
qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
qp_grp);
- if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
- err = qp_grp->res_chunk_list ?
- PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
- goto out_free_qp_grp;
- }
+ if (IS_ERR_OR_NULL(qp_grp->res_chunk_list))
+ return qp_grp->res_chunk_list ?
+ PTR_ERR(qp_grp->res_chunk_list) :
+ -ENOMEM;
err = qp_grp_and_vf_bind(vf, pd, qp_grp);
if (err)
@@ -724,7 +718,7 @@ usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
usnic_ib_sysfs_qpn_add(qp_grp);
- return qp_grp;
+ return 0;
out_release_flow:
release_and_remove_flow(qp_flow);
@@ -732,10 +726,7 @@ out_qp_grp_vf_unbind:
qp_grp_and_vf_unbind(qp_grp);
out_free_res:
free_qp_grp_res(qp_grp->res_chunk_list);
-out_free_qp_grp:
- kfree(qp_grp);
-
- return ERR_PTR(err);
+ return err;
}
void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
@@ -748,7 +739,6 @@ void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
usnic_ib_sysfs_qpn_remove(qp_grp);
qp_grp_and_vf_unbind(qp_grp);
free_qp_grp_res(qp_grp->res_chunk_list);
- kfree(qp_grp);
}
struct usnic_vnic_res_chunk*
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
index a8a2314c9531..62e732be6736 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
@@ -89,11 +89,11 @@ extern const struct usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX];
const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state);
int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz);
int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz);
-struct usnic_ib_qp_grp *
-usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
- struct usnic_ib_pd *pd,
- struct usnic_vnic_res_spec *res_spec,
- struct usnic_transport_spec *trans_spec);
+int usnic_ib_qp_grp_create(struct usnic_ib_qp_grp *qp,
+ struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
+ struct usnic_ib_pd *pd,
+ struct usnic_vnic_res_spec *res_spec,
+ struct usnic_transport_spec *trans_spec);
void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp);
int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
enum ib_qp_state new_state,
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 57d210ca855a..06a4e9d4545d 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -168,30 +168,31 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
return 0;
}
-static struct usnic_ib_qp_grp*
-find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
- struct usnic_ib_pd *pd,
- struct usnic_transport_spec *trans_spec,
- struct usnic_vnic_res_spec *res_spec)
+static int
+find_free_vf_and_create_qp_grp(struct ib_qp *qp,
+ struct usnic_transport_spec *trans_spec,
+ struct usnic_vnic_res_spec *res_spec)
{
+ struct usnic_ib_dev *us_ibdev = to_usdev(qp->device);
+ struct usnic_ib_pd *pd = to_upd(qp->pd);
struct usnic_ib_vf *vf;
struct usnic_vnic *vnic;
- struct usnic_ib_qp_grp *qp_grp;
+ struct usnic_ib_qp_grp *qp_grp = to_uqp_grp(qp);
struct device *dev, **dev_list;
- int i;
+ int i, ret;
BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
if (list_empty(&us_ibdev->vf_dev_list)) {
usnic_info("No vfs to allocate\n");
- return NULL;
+ return -ENOMEM;
}
if (usnic_ib_share_vf) {
/* Try to find resouces on a used vf which is in pd */
dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
if (IS_ERR(dev_list))
- return ERR_CAST(dev_list);
+ return PTR_ERR(dev_list);
for (i = 0; dev_list[i]; i++) {
dev = dev_list[i];
vf = dev_get_drvdata(dev);
@@ -202,10 +203,10 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
dev_name(&us_ibdev->ib_dev.dev),
pci_name(usnic_vnic_get_pdev(
vnic)));
- qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev,
- vf, pd,
- res_spec,
- trans_spec);
+ ret = usnic_ib_qp_grp_create(qp_grp,
+ us_ibdev->ufdev,
+ vf, pd, res_spec,
+ trans_spec);
spin_unlock(&vf->lock);
goto qp_grp_check;
@@ -223,9 +224,9 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
vnic = vf->vnic;
if (vf->qp_grp_ref_cnt == 0 &&
usnic_vnic_check_room(vnic, res_spec) == 0) {
- qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf,
- pd, res_spec,
- trans_spec);
+ ret = usnic_ib_qp_grp_create(qp_grp, us_ibdev->ufdev,
+ vf, pd, res_spec,
+ trans_spec);
spin_unlock(&vf->lock);
goto qp_grp_check;
@@ -235,16 +236,15 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
usnic_info("No free qp grp found on %s\n",
dev_name(&us_ibdev->ib_dev.dev));
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
qp_grp_check:
- if (IS_ERR_OR_NULL(qp_grp)) {
+ if (ret) {
usnic_err("Failed to allocate qp_grp\n");
if (usnic_ib_share_vf)
usnic_uiom_free_dev_list(dev_list);
- return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
}
- return qp_grp;
+ return ret;
}
static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
@@ -458,13 +458,12 @@ int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
return 0;
}
-struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+int usnic_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
{
int err;
struct usnic_ib_dev *us_ibdev;
- struct usnic_ib_qp_grp *qp_grp;
+ struct usnic_ib_qp_grp *qp_grp = to_uqp_grp(ibqp);
struct usnic_ib_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct usnic_ib_ucontext, ibucontext);
int cq_cnt;
@@ -474,29 +473,29 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
usnic_dbg("\n");
- us_ibdev = to_usdev(pd->device);
+ us_ibdev = to_usdev(ibqp->device);
if (init_attr->create_flags)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (err) {
usnic_err("%s: cannot copy udata for create_qp\n",
dev_name(&us_ibdev->ib_dev.dev));
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
err = create_qp_validate_user_data(cmd);
if (err) {
usnic_err("%s: Failed to validate user data\n",
dev_name(&us_ibdev->ib_dev.dev));
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (init_attr->qp_type != IB_QPT_UD) {
usnic_err("%s asked to make a non-UD QP: %d\n",
dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type);
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
trans_spec = cmd.spec;
@@ -504,13 +503,9 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
res_spec = min_transport_spec[trans_spec.trans_type];
usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
- qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
- &trans_spec,
- &res_spec);
- if (IS_ERR_OR_NULL(qp_grp)) {
- err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
+ err = find_free_vf_and_create_qp_grp(ibqp, &trans_spec, &res_spec);
+ if (err)
goto out_release_mutex;
- }
err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
if (err) {
@@ -522,13 +517,13 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
usnic_ib_log_vf(qp_grp->vf);
mutex_unlock(&us_ibdev->usdev_lock);
- return &qp_grp->ibqp;
+ return 0;
out_release_qp_grp:
qp_grp_destroy(qp_grp);
out_release_mutex:
mutex_unlock(&us_ibdev->usdev_lock);
- return ERR_PTR(err);
+ return err;
}
int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 6b82d0f2d184..6ca9ee0dddbe 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -50,9 +50,8 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid);
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
+int usnic_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 8ed8bc24c69f..105f3a155939 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -185,6 +185,7 @@ static const struct ib_device_ops pvrdma_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, pvrdma_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext),
};
@@ -810,18 +811,10 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
}
/* Enable 64-Bit DMA */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret != 0) {
- dev_err(&pdev->dev,
- "pci_set_consistent_dma_mask failed\n");
- goto err_free_resource;
- }
- } else {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret != 0) {
- dev_err(&pdev->dev,
- "pci_set_dma_mask failed\n");
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
goto err_free_resource;
}
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 67769b715126..f83cd4a9d992 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -182,18 +182,17 @@ static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
/**
* pvrdma_create_qp - create queue pair
- * @pd: protection domain
+ * @ibqp: queue pair
* @init_attr: queue pair attributes
* @udata: user data
*
- * @return: the ib_qp pointer on success, otherwise returns an errno.
+ * @return: the 0 on success, otherwise returns an errno.
*/
-struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+int pvrdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct pvrdma_qp *qp = NULL;
- struct pvrdma_dev *dev = to_vdev(pd->device);
+ struct pvrdma_qp *qp = to_vqp(ibqp);
+ struct pvrdma_dev *dev = to_vdev(ibqp->device);
union pvrdma_cmd_req req;
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
@@ -209,7 +208,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
dev_warn(&dev->pdev->dev,
"invalid create queuepair flags %#x\n",
init_attr->create_flags);
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
if (init_attr->qp_type != IB_QPT_RC &&
@@ -217,22 +216,22 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
init_attr->qp_type != IB_QPT_GSI) {
dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n",
init_attr->qp_type);
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
if (is_srq && !dev->dsr->caps.max_srq) {
dev_warn(&dev->pdev->dev,
"SRQs not supported by device\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp))
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
switch (init_attr->qp_type) {
case IB_QPT_GSI:
if (init_attr->port_num == 0 ||
- init_attr->port_num > pd->device->phys_port_cnt) {
+ init_attr->port_num > ibqp->device->phys_port_cnt) {
dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n");
ret = -EINVAL;
goto err_qp;
@@ -240,12 +239,6 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
fallthrough;
case IB_QPT_RC:
case IB_QPT_UD:
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- ret = -ENOMEM;
- goto err_qp;
- }
-
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
mutex_init(&qp->mutex);
@@ -275,9 +268,9 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
if (!is_srq) {
/* set qp->sq.wqe_cnt, shift, buf_size.. */
- qp->rumem =
- ib_umem_get(pd->device, ucmd.rbuf_addr,
- ucmd.rbuf_size, 0);
+ qp->rumem = ib_umem_get(ibqp->device,
+ ucmd.rbuf_addr,
+ ucmd.rbuf_size, 0);
if (IS_ERR(qp->rumem)) {
ret = PTR_ERR(qp->rumem);
goto err_qp;
@@ -288,7 +281,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->srq = to_vsrq(init_attr->srq);
}
- qp->sumem = ib_umem_get(pd->device, ucmd.sbuf_addr,
+ qp->sumem = ib_umem_get(ibqp->device, ucmd.sbuf_addr,
ucmd.sbuf_size, 0);
if (IS_ERR(qp->sumem)) {
if (!is_srq)
@@ -306,12 +299,12 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->npages_recv = 0;
qp->npages = qp->npages_send + qp->npages_recv;
} else {
- ret = pvrdma_set_sq_size(to_vdev(pd->device),
+ ret = pvrdma_set_sq_size(to_vdev(ibqp->device),
&init_attr->cap, qp);
if (ret)
goto err_qp;
- ret = pvrdma_set_rq_size(to_vdev(pd->device),
+ ret = pvrdma_set_rq_size(to_vdev(ibqp->device),
&init_attr->cap, qp);
if (ret)
goto err_qp;
@@ -362,7 +355,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP;
- cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->pd_handle = to_vpd(ibqp->pd)->pd_handle;
cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle;
cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle;
if (is_srq)
@@ -418,11 +411,11 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
dev_warn(&dev->pdev->dev,
"failed to copy back udata\n");
__pvrdma_destroy_qp(dev, qp);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
}
- return &qp->ibqp;
+ return 0;
err_pdir:
pvrdma_page_dir_cleanup(dev, &qp->pdir);
@@ -430,10 +423,8 @@ err_umem:
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
err_qp:
- kfree(qp);
atomic_dec(&dev->num_qps);
-
- return ERR_PTR(ret);
+ return ret;
}
static void _pvrdma_free_qp(struct pvrdma_qp *qp)
@@ -454,8 +445,6 @@ static void _pvrdma_free_qp(struct pvrdma_qp *qp)
pvrdma_page_dir_cleanup(dev, &qp->pdir);
- kfree(qp);
-
atomic_dec(&dev->num_qps);
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index 544b94d97c3a..78807b23d831 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -390,9 +390,8 @@ int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
-struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
+int pvrdma_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index a3e5b368c5e7..63999239ed9e 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2016 - 2019 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/slab.h>
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
index 5a85edd06491..c11fdf637d64 100644
--- a/drivers/infiniband/sw/rdmavt/ah.h
+++ b/drivers/infiniband/sw/rdmavt/ah.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RVTAH_H
-#define DEF_RVTAH_H
-
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RVTAH_H
+#define DEF_RVTAH_H
+
#include <rdma/rdma_vt.h>
int rvt_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 5138afca067f..9fe4dcaa049a 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2016 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/slab.h>
diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h
index feb01e7ee004..b0a948ec760b 100644
--- a/drivers/infiniband/sw/rdmavt/cq.h
+++ b/drivers/infiniband/sw/rdmavt/cq.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RVTCQ_H
-#define DEF_RVTCQ_H
-
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RVTCQ_H
+#define DEF_RVTCQ_H
+
#include <rdma/rdma_vt.h>
#include <rdma/rdmavt_cq.h>
diff --git a/drivers/infiniband/sw/rdmavt/mad.c b/drivers/infiniband/sw/rdmavt/mad.c
index 207bc0ed96ff..98a8fe3b04ef 100644
--- a/drivers/infiniband/sw/rdmavt/mad.c
+++ b/drivers/infiniband/sw/rdmavt/mad.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <rdma/ib_mad.h>
diff --git a/drivers/infiniband/sw/rdmavt/mad.h b/drivers/infiniband/sw/rdmavt/mad.h
index 1eae5efea4be..368be29eab37 100644
--- a/drivers/infiniband/sw/rdmavt/mad.h
+++ b/drivers/infiniband/sw/rdmavt/mad.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RVTMAD_H
-#define DEF_RVTMAD_H
-
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RVTMAD_H
+#define DEF_RVTMAD_H
+
#include <rdma/rdma_vt.h>
int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
diff --git a/drivers/infiniband/sw/rdmavt/mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index 951abac13dbb..a123874e1ca7 100644
--- a/drivers/infiniband/sw/rdmavt/mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/slab.h>
diff --git a/drivers/infiniband/sw/rdmavt/mcast.h b/drivers/infiniband/sw/rdmavt/mcast.h
index 29f579267608..b96d86f9625b 100644
--- a/drivers/infiniband/sw/rdmavt/mcast.h
+++ b/drivers/infiniband/sw/rdmavt/mcast.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RVTMCAST_H
-#define DEF_RVTMCAST_H
-
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RVTMCAST_H
+#define DEF_RVTMCAST_H
+
#include <rdma/rdma_vt.h>
void rvt_driver_mcast_init(struct rvt_dev_info *rdi);
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
index f5d0e33cf3d7..4d2238f3f3c8 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.c
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/slab.h>
diff --git a/drivers/infiniband/sw/rdmavt/mmap.h b/drivers/infiniband/sw/rdmavt/mmap.h
index 02466c40bc1e..7e92cf28e071 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.h
+++ b/drivers/infiniband/sw/rdmavt/mmap.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RDMAVTMMAP_H
-#define DEF_RDMAVTMMAP_H
-
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RDMAVTMMAP_H
+#define DEF_RDMAVTMMAP_H
+
#include <rdma/rdma_vt.h>
void rvt_mmap_init(struct rvt_dev_info *rdi);
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 34b7af6ab9c2..8a1f2e285180 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/slab.h>
@@ -410,7 +368,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.page_shift = PAGE_SHIFT;
m = 0;
n = 0;
- for_each_sg_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+ for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
void *vaddr;
vaddr = page_address(sg_page_iter_page(&sg_iter));
diff --git a/drivers/infiniband/sw/rdmavt/mr.h b/drivers/infiniband/sw/rdmavt/mr.h
index b3aba359401b..d17f1400b5f6 100644
--- a/drivers/infiniband/sw/rdmavt/mr.h
+++ b/drivers/infiniband/sw/rdmavt/mr.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RVTMR_H
-#define DEF_RVTMR_H
-
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RVTMR_H
+#define DEF_RVTMR_H
+
#include <rdma/rdma_vt.h>
struct rvt_mr {
diff --git a/drivers/infiniband/sw/rdmavt/pd.c b/drivers/infiniband/sw/rdmavt/pd.c
index 01b7abf91520..ae62071969fa 100644
--- a/drivers/infiniband/sw/rdmavt/pd.c
+++ b/drivers/infiniband/sw/rdmavt/pd.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/slab.h>
diff --git a/drivers/infiniband/sw/rdmavt/pd.h b/drivers/infiniband/sw/rdmavt/pd.h
index 06a6a38beedc..42a0ef3b7da3 100644
--- a/drivers/infiniband/sw/rdmavt/pd.h
+++ b/drivers/infiniband/sw/rdmavt/pd.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RDMAVTPD_H
-#define DEF_RDMAVTPD_H
-
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RDMAVTPD_H
+#define DEF_RDMAVTPD_H
+
#include <rdma/rdma_vt.h>
int rvt_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index e9f3d356b361..49bdd78ac664 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2016 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/hash.h>
@@ -1058,7 +1016,7 @@ static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
/**
* rvt_create_qp - create a queue pair for a device
- * @ibpd: the protection domain who's device we create the queue pair for
+ * @ibqp: the queue pair
* @init_attr: the attributes of the queue pair
* @udata: user data for libibverbs.so
*
@@ -1066,47 +1024,45 @@ static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
* unique idea of what queue pair numbers mean. For instance there is a reserved
* range for PSM.
*
- * Return: the queue pair on success, otherwise returns an errno.
+ * Return: 0 on success, otherwise returns an errno.
*
* Called by the ib_create_qp() core verbs function.
*/
-struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct rvt_qp *qp;
- int err;
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ int ret = -ENOMEM;
struct rvt_swqe *swq = NULL;
size_t sz;
- size_t sg_list_sz;
- struct ib_qp *ret = ERR_PTR(-ENOMEM);
- struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
+ size_t sg_list_sz = 0;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
void *priv = NULL;
size_t sqsize;
u8 exclude_prefix = 0;
if (!rdi)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (init_attr->create_flags & ~IB_QP_CREATE_NETDEV_USE)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
/* Check receive queue parameters if no SRQ is specified. */
if (!init_attr->srq) {
if (init_attr->cap.max_recv_sge >
rdi->dparms.props.max_recv_sge ||
init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (init_attr->cap.max_send_sge +
init_attr->cap.max_send_wr +
init_attr->cap.max_recv_sge +
init_attr->cap.max_recv_wr == 0)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
sqsize =
init_attr->cap.max_send_wr + 1 +
@@ -1115,8 +1071,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
case IB_QPT_SMI:
case IB_QPT_GSI:
if (init_attr->port_num == 0 ||
- init_attr->port_num > ibpd->device->phys_port_cnt)
- return ERR_PTR(-EINVAL);
+ init_attr->port_num > ibqp->device->phys_port_cnt)
+ return -EINVAL;
fallthrough;
case IB_QPT_UC:
case IB_QPT_RC:
@@ -1124,10 +1080,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge);
swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
if (!swq)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- sz = sizeof(*qp);
- sg_list_sz = 0;
if (init_attr->srq) {
struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
@@ -1137,10 +1091,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
} else if (init_attr->cap.max_recv_sge > 1)
sg_list_sz = sizeof(*qp->r_sg_list) *
(init_attr->cap.max_recv_sge - 1);
- qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
- rdi->dparms.node);
- if (!qp)
- goto bail_swq;
+ qp->r_sg_list =
+ kzalloc_node(sg_list_sz, GFP_KERNEL, rdi->dparms.node);
+ if (!qp->r_sg_list)
+ goto bail_qp;
qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
RCU_INIT_POINTER(qp->next, NULL);
@@ -1165,7 +1119,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
*/
priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
if (IS_ERR(priv)) {
- ret = priv;
+ ret = PTR_ERR(priv);
goto bail_qp;
}
qp->priv = priv;
@@ -1179,12 +1133,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
sizeof(struct rvt_rwqe);
- err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
+ ret = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
rdi->dparms.node, udata);
- if (err) {
- ret = ERR_PTR(err);
+ if (ret)
goto bail_driver_priv;
- }
}
/*
@@ -1205,40 +1157,35 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->s_max_sge = init_attr->cap.max_send_sge;
if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
qp->s_flags = RVT_S_SIGNAL_REQ_WR;
- err = alloc_ud_wq_attr(qp, rdi->dparms.node);
- if (err) {
- ret = (ERR_PTR(err));
+ ret = alloc_ud_wq_attr(qp, rdi->dparms.node);
+ if (ret)
goto bail_rq_rvt;
- }
if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
exclude_prefix = RVT_AIP_QP_PREFIX;
- err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
+ ret = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
init_attr->qp_type,
init_attr->port_num,
exclude_prefix);
- if (err < 0) {
- ret = ERR_PTR(err);
+ if (ret < 0)
goto bail_rq_wq;
- }
- qp->ibqp.qp_num = err;
+
+ qp->ibqp.qp_num = ret;
if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
qp->port_num = init_attr->port_num;
rvt_init_qp(rdi, qp, init_attr->qp_type);
if (rdi->driver_f.qp_priv_init) {
- err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
- if (err) {
- ret = ERR_PTR(err);
+ ret = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
+ if (ret)
goto bail_rq_wq;
- }
}
break;
default:
/* Don't support raw QPs */
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
init_attr->cap.max_inline_data = 0;
@@ -1251,28 +1198,24 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (!qp->r_rq.wq) {
__u64 offset = 0;
- err = ib_copy_to_udata(udata, &offset,
+ ret = ib_copy_to_udata(udata, &offset,
sizeof(offset));
- if (err) {
- ret = ERR_PTR(err);
+ if (ret)
goto bail_qpn;
- }
} else {
u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
qp->ip = rvt_create_mmap_info(rdi, s, udata,
qp->r_rq.wq);
if (IS_ERR(qp->ip)) {
- ret = ERR_CAST(qp->ip);
+ ret = PTR_ERR(qp->ip);
goto bail_qpn;
}
- err = ib_copy_to_udata(udata, &qp->ip->offset,
+ ret = ib_copy_to_udata(udata, &qp->ip->offset,
sizeof(qp->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
+ if (ret)
goto bail_ip;
- }
}
qp->pid = current->pid;
}
@@ -1280,7 +1223,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
spin_lock(&rdi->n_qps_lock);
if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
spin_unlock(&rdi->n_qps_lock);
- ret = ERR_PTR(-ENOMEM);
+ ret = ENOMEM;
goto bail_ip;
}
@@ -1306,9 +1249,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
spin_unlock_irq(&rdi->pending_lock);
}
- ret = &qp->ibqp;
-
- return ret;
+ return 0;
bail_ip:
if (qp->ip)
@@ -1328,11 +1269,8 @@ bail_driver_priv:
bail_qp:
kfree(qp->s_ack_queue);
- kfree(qp);
-
-bail_swq:
+ kfree(qp->r_sg_list);
vfree(swq);
-
return ret;
}
@@ -1762,11 +1700,11 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
kvfree(qp->r_rq.kwq);
rdi->driver_f.qp_priv_free(rdi, qp);
kfree(qp->s_ack_queue);
+ kfree(qp->r_sg_list);
rdma_destroy_ah_attr(&qp->remote_ah_attr);
rdma_destroy_ah_attr(&qp->alt_ah_attr);
free_ud_wq_attr(qp);
vfree(qp->s_wq);
- kfree(qp);
return 0;
}
diff --git a/drivers/infiniband/sw/rdmavt/qp.h b/drivers/infiniband/sw/rdmavt/qp.h
index 2cdba1283bf6..bd04be80723c 100644
--- a/drivers/infiniband/sw/rdmavt/qp.h
+++ b/drivers/infiniband/sw/rdmavt/qp.h
@@ -1,60 +1,17 @@
-#ifndef DEF_RVTQP_H
-#define DEF_RVTQP_H
-
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RVTQP_H
+#define DEF_RVTQP_H
+
#include <rdma/rdmavt_qp.h>
int rvt_driver_qp_init(struct rvt_dev_info *rdi);
void rvt_qp_exit(struct rvt_dev_info *rdi);
-struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
+int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c
index c58735f4c94a..4e5d4a27633c 100644
--- a/drivers/infiniband/sw/rdmavt/rc.c
+++ b/drivers/infiniband/sw/rdmavt/rc.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <rdma/rdmavt_qp.h>
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index 2a7c2f12d372..14d196bde2a1 100644
--- a/drivers/infiniband/sw/rdmavt/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/err.h>
diff --git a/drivers/infiniband/sw/rdmavt/srq.h b/drivers/infiniband/sw/rdmavt/srq.h
index d5a1a053b1b9..7d17372cd269 100644
--- a/drivers/infiniband/sw/rdmavt/srq.h
+++ b/drivers/infiniband/sw/rdmavt/srq.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RVTSRQ_H
-#define DEF_RVTSRQ_H
-
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RVTSRQ_H
+#define DEF_RVTSRQ_H
+
#include <rdma/rdma_vt.h>
void rvt_driver_srq_init(struct rvt_dev_info *rdi);
int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
diff --git a/drivers/infiniband/sw/rdmavt/trace.c b/drivers/infiniband/sw/rdmavt/trace.c
index d593285a349c..01704b8dd683 100644
--- a/drivers/infiniband/sw/rdmavt/trace.c
+++ b/drivers/infiniband/sw/rdmavt/trace.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#define CREATE_TRACE_POINTS
diff --git a/drivers/infiniband/sw/rdmavt/trace.h b/drivers/infiniband/sw/rdmavt/trace.h
index 36ddbd291ee0..30eb4a72ea7d 100644
--- a/drivers/infiniband/sw/rdmavt/trace.h
+++ b/drivers/infiniband/sw/rdmavt/trace.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016, 2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#define RDI_DEV_ENTRY(rdi) __string(dev, rvt_get_ibdev_name(rdi))
diff --git a/drivers/infiniband/sw/rdmavt/trace_cq.h b/drivers/infiniband/sw/rdmavt/trace_cq.h
index 91bc192cee5e..30dd1d9bae26 100644
--- a/drivers/infiniband/sw/rdmavt/trace_cq.h
+++ b/drivers/infiniband/sw/rdmavt/trace_cq.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#if !defined(__RVT_TRACE_CQ_H) || defined(TRACE_HEADER_MULTI_READ)
#define __RVT_TRACE_CQ_H
diff --git a/drivers/infiniband/sw/rdmavt/trace_mr.h b/drivers/infiniband/sw/rdmavt/trace_mr.h
index c5b675ca4fa0..1de7012000cb 100644
--- a/drivers/infiniband/sw/rdmavt/trace_mr.h
+++ b/drivers/infiniband/sw/rdmavt/trace_mr.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#if !defined(__RVT_TRACE_MR_H) || defined(TRACE_HEADER_MULTI_READ)
#define __RVT_TRACE_MR_H
diff --git a/drivers/infiniband/sw/rdmavt/trace_qp.h b/drivers/infiniband/sw/rdmavt/trace_qp.h
index 800cec8bb3c7..c28c81fcb32a 100644
--- a/drivers/infiniband/sw/rdmavt/trace_qp.h
+++ b/drivers/infiniband/sw/rdmavt/trace_qp.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#if !defined(__RVT_TRACE_QP_H) || defined(TRACE_HEADER_MULTI_READ)
#define __RVT_TRACE_QP_H
diff --git a/drivers/infiniband/sw/rdmavt/trace_rc.h b/drivers/infiniband/sw/rdmavt/trace_rc.h
index 9de52e138025..833bf778b05d 100644
--- a/drivers/infiniband/sw/rdmavt/trace_rc.h
+++ b/drivers/infiniband/sw/rdmavt/trace_rc.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#if !defined(__RVT_TRACE_RC_H) || defined(TRACE_HEADER_MULTI_READ)
#define __RVT_TRACE_RC_H
diff --git a/drivers/infiniband/sw/rdmavt/trace_rvt.h b/drivers/infiniband/sw/rdmavt/trace_rvt.h
index 746f33461d9a..9df6b0b8263b 100644
--- a/drivers/infiniband/sw/rdmavt/trace_rvt.h
+++ b/drivers/infiniband/sw/rdmavt/trace_rvt.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#if !defined(__RVT_TRACE_RVT_H) || defined(TRACE_HEADER_MULTI_READ)
#define __RVT_TRACE_RVT_H
diff --git a/drivers/infiniband/sw/rdmavt/trace_tx.h b/drivers/infiniband/sw/rdmavt/trace_tx.h
index cb96be0f8f19..ff7d39a30768 100644
--- a/drivers/infiniband/sw/rdmavt/trace_tx.h
+++ b/drivers/infiniband/sw/rdmavt/trace_tx.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#if !defined(__RVT_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
#define __RVT_TRACE_TX_H
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index ac17209816cd..59481ae39505 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -1,48 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2016 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/module.h>
@@ -131,6 +89,13 @@ static int rvt_query_device(struct ib_device *ibdev,
return 0;
}
+static int rvt_get_numa_node(struct ib_device *ibdev)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+
+ return rdi->dparms.node;
+}
+
static int rvt_modify_device(struct ib_device *device,
int device_modify_mask,
struct ib_device_modify *device_modify)
@@ -380,6 +345,7 @@ static const struct ib_device_ops rvt_dev_ops = {
.destroy_srq = rvt_destroy_srq,
.detach_mcast = rvt_detach_mcast,
.get_dma_mr = rvt_get_dma_mr,
+ .get_numa_node = rvt_get_numa_node,
.get_port_immutable = rvt_get_port_immutable,
.map_mr_sg = rvt_map_mr_sg,
.mmap = rvt_mmap,
@@ -406,6 +372,7 @@ static const struct ib_device_ops rvt_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, rvt_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, rvt_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, rvt_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, rvt_ucontext, ibucontext),
};
diff --git a/drivers/infiniband/sw/rdmavt/vt.h b/drivers/infiniband/sw/rdmavt/vt.h
index c0fed6510f0b..461574e3f6a5 100644
--- a/drivers/infiniband/sw/rdmavt/vt.h
+++ b/drivers/infiniband/sw/rdmavt/vt.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RDMAVT_H
-#define DEF_RDMAVT_H
-
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RDMAVT_H
+#define DEF_RDMAVT_H
+
#include <rdma/rdma_vt.h>
#include <linux/pci.h>
#include "pd.h"
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 623fd17df02d..1bb3fb618bf5 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/skbuff.h>
-#include <linux/crc32.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
@@ -42,27 +41,6 @@
extern bool rxe_initialized;
-static inline u32 rxe_crc32(struct rxe_dev *rxe,
- u32 crc, void *next, size_t len)
-{
- u32 retval;
- int err;
-
- SHASH_DESC_ON_STACK(shash, rxe->tfm);
-
- shash->tfm = rxe->tfm;
- *(u32 *)shash_desc_ctx(shash) = crc;
- err = crypto_shash_update(shash, next, len);
- if (unlikely(err)) {
- pr_warn_ratelimited("failed crc calculation, err: %d\n", err);
- return crc32_le(crc, next, len);
- }
-
- retval = *(u32 *)shash_desc_ctx(shash);
- barrier_data(shash_desc_ctx(shash));
- return retval;
-}
-
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 58ad9c2644f3..d2d802c776fd 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -349,7 +349,7 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, payload_addr(pkt),
- payload_size(pkt), RXE_TO_MR_OBJ, NULL);
+ payload_size(pkt), RXE_TO_MR_OBJ);
if (ret) {
wqe->status = IB_WC_LOC_PROT_ERR;
return COMPST_ERROR;
@@ -371,7 +371,7 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, &atomic_orig,
- sizeof(u64), RXE_TO_MR_OBJ, NULL);
+ sizeof(u64), RXE_TO_MR_OBJ);
if (ret) {
wqe->status = IB_WC_LOC_PROT_ERR;
return COMPST_ERROR;
diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c
index 66b2aad54bb7..e03af3012590 100644
--- a/drivers/infiniband/sw/rxe/rxe_icrc.c
+++ b/drivers/infiniband/sw/rxe/rxe_icrc.c
@@ -4,18 +4,79 @@
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*/
+#include <linux/crc32.h>
+
#include "rxe.h"
#include "rxe_loc.h"
-/* Compute a partial ICRC for all the IB transport headers. */
-u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb)
+/**
+ * rxe_icrc_init() - Initialize crypto function for computing crc32
+ * @rxe: rdma_rxe device object
+ *
+ * Return: 0 on success else an error
+ */
+int rxe_icrc_init(struct rxe_dev *rxe)
+{
+ struct crypto_shash *tfm;
+
+ tfm = crypto_alloc_shash("crc32", 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_warn("failed to init crc32 algorithm err:%ld\n",
+ PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+
+ rxe->tfm = tfm;
+
+ return 0;
+}
+
+/**
+ * rxe_crc32() - Compute cumulative crc32 for a contiguous segment
+ * @rxe: rdma_rxe device object
+ * @crc: starting crc32 value from previous segments
+ * @next: starting address of current segment
+ * @len: length of current segment
+ *
+ * Return: the cumulative crc32 checksum
+ */
+static __be32 rxe_crc32(struct rxe_dev *rxe, __be32 crc, void *next, size_t len)
+{
+ __be32 icrc;
+ int err;
+
+ SHASH_DESC_ON_STACK(shash, rxe->tfm);
+
+ shash->tfm = rxe->tfm;
+ *(__be32 *)shash_desc_ctx(shash) = crc;
+ err = crypto_shash_update(shash, next, len);
+ if (unlikely(err)) {
+ pr_warn_ratelimited("failed crc calculation, err: %d\n", err);
+ return (__force __be32)crc32_le((__force u32)crc, next, len);
+ }
+
+ icrc = *(__be32 *)shash_desc_ctx(shash);
+ barrier_data(shash_desc_ctx(shash));
+
+ return icrc;
+}
+
+/**
+ * rxe_icrc_hdr() - Compute the partial ICRC for the network and transport
+ * headers of a packet.
+ * @skb: packet buffer
+ * @pkt: packet information
+ *
+ * Return: the partial ICRC
+ */
+static __be32 rxe_icrc_hdr(struct sk_buff *skb, struct rxe_pkt_info *pkt)
{
unsigned int bth_offset = 0;
struct iphdr *ip4h = NULL;
struct ipv6hdr *ip6h = NULL;
struct udphdr *udph;
struct rxe_bth *bth;
- int crc;
+ __be32 crc;
int length;
int hdr_size = sizeof(struct udphdr) +
(skb->protocol == htons(ETH_P_IP) ?
@@ -30,7 +91,7 @@ u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb)
/* This seed is the result of computing a CRC with a seed of
* 0xfffffff and 8 bytes of 0xff representing a masked LRH.
*/
- crc = 0xdebb20e3;
+ crc = (__force __be32)0xdebb20e3;
if (skb->protocol == htons(ETH_P_IP)) { /* IPv4 */
memcpy(pshdr, ip_hdr(skb), hdr_size);
@@ -67,3 +128,58 @@ u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb)
rxe_opcode[pkt->opcode].length - RXE_BTH_BYTES);
return crc;
}
+
+/**
+ * rxe_icrc_check() - Compute ICRC for a packet and compare to the ICRC
+ * delivered in the packet.
+ * @skb: packet buffer
+ * @pkt: packet information
+ *
+ * Return: 0 if the values match else an error
+ */
+int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+{
+ __be32 *icrcp;
+ __be32 pkt_icrc;
+ __be32 icrc;
+
+ icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE);
+ pkt_icrc = *icrcp;
+
+ icrc = rxe_icrc_hdr(skb, pkt);
+ icrc = rxe_crc32(pkt->rxe, icrc, (u8 *)payload_addr(pkt),
+ payload_size(pkt) + bth_pad(pkt));
+ icrc = ~icrc;
+
+ if (unlikely(icrc != pkt_icrc)) {
+ if (skb->protocol == htons(ETH_P_IPV6))
+ pr_warn_ratelimited("bad ICRC from %pI6c\n",
+ &ipv6_hdr(skb)->saddr);
+ else if (skb->protocol == htons(ETH_P_IP))
+ pr_warn_ratelimited("bad ICRC from %pI4\n",
+ &ip_hdr(skb)->saddr);
+ else
+ pr_warn_ratelimited("bad ICRC from unknown\n");
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * rxe_icrc_generate() - compute ICRC for a packet.
+ * @skb: packet buffer
+ * @pkt: packet information
+ */
+void rxe_icrc_generate(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+{
+ __be32 *icrcp;
+ __be32 icrc;
+
+ icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE);
+ icrc = rxe_icrc_hdr(skb, pkt);
+ icrc = rxe_crc32(pkt->rxe, icrc, (u8 *)payload_addr(pkt),
+ payload_size(pkt) + bth_pad(pkt));
+ *icrcp = ~icrc;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 1ddb20855dee..f0c954575bde 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -77,10 +77,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr);
int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
- enum rxe_mr_copy_dir dir, u32 *crcp);
-int copy_data(struct rxe_pd *pd, int access,
- struct rxe_dma_info *dma, void *addr, int length,
- enum rxe_mr_copy_dir dir, u32 *crcp);
+ enum rxe_mr_copy_dir dir);
+int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
+ void *addr, int length, enum rxe_mr_copy_dir dir);
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type);
@@ -99,11 +98,11 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey);
void rxe_mw_cleanup(struct rxe_pool_entry *arg);
/* rxe_net.c */
-void rxe_loopback(struct sk_buff *skb);
-int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb);
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt);
-int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc);
+int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb);
+int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb);
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid);
int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);
@@ -193,7 +192,10 @@ int rxe_completer(void *arg);
int rxe_requester(void *arg);
int rxe_responder(void *arg);
-u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
+/* rxe_icrc.c */
+int rxe_icrc_init(struct rxe_dev *rxe);
+int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt);
+void rxe_icrc_generate(struct sk_buff *skb, struct rxe_pkt_info *pkt);
void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
@@ -204,47 +206,4 @@ static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
}
-static inline int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
- struct sk_buff *skb)
-{
- int err;
- int is_request = pkt->mask & RXE_REQ_MASK;
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
-
- if ((is_request && (qp->req.state != QP_STATE_READY)) ||
- (!is_request && (qp->resp.state != QP_STATE_READY))) {
- pr_info("Packet dropped. QP is not in ready state\n");
- goto drop;
- }
-
- if (pkt->mask & RXE_LOOPBACK_MASK) {
- memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
- rxe_loopback(skb);
- err = 0;
- } else {
- err = rxe_send(pkt, skb);
- }
-
- if (err) {
- rxe->xmit_errors++;
- rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
- return err;
- }
-
- if ((qp_type(qp) != IB_QPT_RC) &&
- (pkt->mask & RXE_END_MASK)) {
- pkt->wqe->state = wqe_state_done;
- rxe_run_task(&qp->comp.task, 1);
- }
-
- rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
- goto done;
-
-drop:
- kfree_skb(skb);
- err = 0;
-done:
- return err;
-}
-
#endif /* RXE_LOC_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index be4bcb420fab..5890a8246216 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -123,7 +123,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
goto err_out;
}
- mr->umem = umem;
num_buf = ib_umem_num_pages(umem);
rxe_mr_init(access, mr);
@@ -143,7 +142,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
if (length > 0) {
buf = map[0]->buf;
- for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+ for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
if (num_buf >= RXE_BUF_PER_MAP) {
map++;
buf = map[0]->buf;
@@ -286,11 +285,10 @@ out:
}
/* copy data from a range (vaddr, vaddr+length-1) to or from
- * a mr object starting at iova. Compute incremental value of
- * crc32 if crcp is not zero. caller must hold a reference to mr
+ * a mr object starting at iova.
*/
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
- enum rxe_mr_copy_dir dir, u32 *crcp)
+ enum rxe_mr_copy_dir dir)
{
int err;
int bytes;
@@ -300,7 +298,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
int m;
int i;
size_t offset;
- u32 crc = crcp ? (*crcp) : 0;
if (length == 0)
return 0;
@@ -314,10 +311,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
memcpy(dest, src, length);
- if (crcp)
- *crcp = rxe_crc32(to_rdev(mr->ibmr.device), *crcp, dest,
- length);
-
return 0;
}
@@ -348,10 +341,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
memcpy(dest, src, bytes);
- if (crcp)
- crc = rxe_crc32(to_rdev(mr->ibmr.device), crc, dest,
- bytes);
-
length -= bytes;
addr += bytes;
@@ -366,9 +355,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
}
}
- if (crcp)
- *crcp = crc;
-
return 0;
err1:
@@ -384,8 +370,7 @@ int copy_data(
struct rxe_dma_info *dma,
void *addr,
int length,
- enum rxe_mr_copy_dir dir,
- u32 *crcp)
+ enum rxe_mr_copy_dir dir)
{
int bytes;
struct rxe_sge *sge = &dma->sge[dma->cur_sge];
@@ -446,7 +431,7 @@ int copy_data(
if (bytes > 0) {
iova = sge->addr + offset;
- err = rxe_mr_copy(mr, iova, addr, bytes, dir, crcp);
+ err = rxe_mr_copy(mr, iova, addr, bytes, dir);
if (err)
goto err2;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 5ac27f28ace1..2cb810cb890a 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -344,7 +344,7 @@ static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb)
return 0;
}
-int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)
+int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
int err = 0;
@@ -353,8 +353,6 @@ int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)
else if (skb->protocol == htons(ETH_P_IPV6))
err = prepare6(pkt, skb);
- *crc = rxe_icrc_hdr(pkt, skb);
-
if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac))
pkt->mask |= RXE_LOOPBACK_MASK;
@@ -374,7 +372,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
rxe_drop_ref(qp);
}
-int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
+static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
{
int err;
@@ -407,19 +405,64 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
/* fix up a send packet to match the packets
* received from UDP before looping them back
*/
-void rxe_loopback(struct sk_buff *skb)
+static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
{
- struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
if (skb->protocol == htons(ETH_P_IP))
skb_pull(skb, sizeof(struct iphdr));
else
skb_pull(skb, sizeof(struct ipv6hdr));
- if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev)))
+ if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) {
kfree_skb(skb);
+ return -EIO;
+ }
+
+ rxe_rcv(skb);
+
+ return 0;
+}
+
+int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
+{
+ int err;
+ int is_request = pkt->mask & RXE_REQ_MASK;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+
+ if ((is_request && (qp->req.state != QP_STATE_READY)) ||
+ (!is_request && (qp->resp.state != QP_STATE_READY))) {
+ pr_info("Packet dropped. QP is not in ready state\n");
+ goto drop;
+ }
+
+ rxe_icrc_generate(skb, pkt);
+
+ if (pkt->mask & RXE_LOOPBACK_MASK)
+ err = rxe_loopback(skb, pkt);
else
- rxe_rcv(skb);
+ err = rxe_send(skb, pkt);
+ if (err) {
+ rxe->xmit_errors++;
+ rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
+ return err;
+ }
+
+ if ((qp_type(qp) != IB_QPT_RC) &&
+ (pkt->mask & RXE_END_MASK)) {
+ pkt->wqe->state = wqe_state_done;
+ rxe_run_task(&qp->comp.task, 1);
+ }
+
+ rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
+ goto done;
+
+drop:
+ kfree_skb(skb);
+ err = 0;
+done:
+ return err;
}
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 0b8e7c6255a2..ffa8420b4765 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -41,7 +41,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
.size = sizeof(struct rxe_qp),
.elem_offset = offsetof(struct rxe_qp, pelem),
.cleanup = rxe_qp_cleanup,
- .flags = RXE_POOL_INDEX,
+ .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_QP_INDEX,
.max_index = RXE_MAX_QP_INDEX,
},
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 7a49e27da23a..6a6cc1fa90e4 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -361,8 +361,6 @@ void rxe_rcv(struct sk_buff *skb)
int err;
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
struct rxe_dev *rxe = pkt->rxe;
- __be32 *icrcp;
- u32 calc_icrc, pack_icrc;
if (unlikely(skb->len < RXE_BTH_BYTES))
goto drop;
@@ -384,26 +382,9 @@ void rxe_rcv(struct sk_buff *skb)
if (unlikely(err))
goto drop;
- /* Verify ICRC */
- icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE);
- pack_icrc = be32_to_cpu(*icrcp);
-
- calc_icrc = rxe_icrc_hdr(pkt, skb);
- calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt),
- payload_size(pkt) + bth_pad(pkt));
- calc_icrc = (__force u32)cpu_to_be32(~calc_icrc);
- if (unlikely(calc_icrc != pack_icrc)) {
- if (skb->protocol == htons(ETH_P_IPV6))
- pr_warn_ratelimited("bad ICRC from %pI6c\n",
- &ipv6_hdr(skb)->saddr);
- else if (skb->protocol == htons(ETH_P_IP))
- pr_warn_ratelimited("bad ICRC from %pI4\n",
- &ip_hdr(skb)->saddr);
- else
- pr_warn_ratelimited("bad ICRC from unknown\n");
-
+ err = rxe_icrc_check(skb, pkt);
+ if (unlikely(err))
goto drop;
- }
rxe_counter_inc(rxe, RXE_CNT_RCVD_PKTS);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index c57699cc6578..3894197a82f6 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -466,12 +466,9 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
struct rxe_pkt_info *pkt, struct sk_buff *skb,
int paylen)
{
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- u32 crc = 0;
- u32 *p;
int err;
- err = rxe_prepare(pkt, skb, &crc);
+ err = rxe_prepare(pkt, skb);
if (err)
return err;
@@ -479,7 +476,6 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
if (wqe->wr.send_flags & IB_SEND_INLINE) {
u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
- crc = rxe_crc32(rxe, crc, tmp, paylen);
memcpy(payload_addr(pkt), tmp, paylen);
wqe->dma.resid -= paylen;
@@ -487,8 +483,7 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
} else {
err = copy_data(qp->pd, 0, &wqe->dma,
payload_addr(pkt), paylen,
- RXE_FROM_MR_OBJ,
- &crc);
+ RXE_FROM_MR_OBJ);
if (err)
return err;
}
@@ -496,12 +491,8 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
u8 *pad = payload_addr(pkt) + paylen;
memset(pad, 0, bth_pad(pkt));
- crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt));
}
}
- p = payload_addr(pkt) + paylen + bth_pad(pkt);
-
- *p = ~crc;
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 360ec67cb9e1..5501227ddc65 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -536,7 +536,7 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
int err;
err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
- data_addr, data_len, RXE_TO_MR_OBJ, NULL);
+ data_addr, data_len, RXE_TO_MR_OBJ);
if (unlikely(err))
return (err == -ENOSPC) ? RESPST_ERR_LENGTH
: RESPST_ERR_MALFORMED_WQE;
@@ -552,7 +552,7 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
int data_len = payload_size(pkt);
err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
- payload_addr(pkt), data_len, RXE_TO_MR_OBJ, NULL);
+ payload_addr(pkt), data_len, RXE_TO_MR_OBJ);
if (err) {
rc = RESPST_ERR_RKEY_VIOLATION;
goto out;
@@ -613,13 +613,10 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
int opcode,
int payload,
u32 psn,
- u8 syndrome,
- u32 *crcp)
+ u8 syndrome)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct sk_buff *skb;
- u32 crc = 0;
- u32 *p;
int paylen;
int pad;
int err;
@@ -651,20 +648,12 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
if (ack->mask & RXE_ATMACK_MASK)
atmack_set_orig(ack, qp->resp.atomic_orig);
- err = rxe_prepare(ack, skb, &crc);
+ err = rxe_prepare(ack, skb);
if (err) {
kfree_skb(skb);
return NULL;
}
- if (crcp) {
- /* CRC computation will be continued by the caller */
- *crcp = crc;
- } else {
- p = payload_addr(ack) + payload + bth_pad(ack);
- *p = ~crc;
- }
-
return skb;
}
@@ -682,8 +671,6 @@ static enum resp_states read_reply(struct rxe_qp *qp,
int opcode;
int err;
struct resp_res *res = qp->resp.res;
- u32 icrc;
- u32 *p;
if (!res) {
/* This is the first time we process that request. Get a
@@ -742,24 +729,20 @@ static enum resp_states read_reply(struct rxe_qp *qp,
payload = min_t(int, res->read.resid, mtu);
skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
- res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
+ res->cur_psn, AETH_ACK_UNLIMITED);
if (!skb)
return RESPST_ERR_RNR;
err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
- payload, RXE_FROM_MR_OBJ, &icrc);
+ payload, RXE_FROM_MR_OBJ);
if (err)
pr_err("Failed copying memory\n");
if (bth_pad(&ack_pkt)) {
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
u8 *pad = payload_addr(&ack_pkt) + payload;
memset(pad, 0, bth_pad(&ack_pkt));
- icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt));
}
- p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
- *p = ~icrc;
err = rxe_xmit_packet(qp, &ack_pkt, skb);
if (err) {
@@ -984,7 +967,7 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
struct sk_buff *skb;
skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
- 0, psn, syndrome, NULL);
+ 0, psn, syndrome);
if (!skb) {
err = -ENOMEM;
goto err1;
@@ -1008,7 +991,7 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
skb = prepare_ack_packet(qp, pkt, &ack_pkt,
IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
- syndrome, NULL);
+ syndrome);
if (!skb) {
rc = -ENOMEM;
goto out;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index c223959ac174..267b5a9c345d 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -391,59 +391,52 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
return err;
}
-static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init,
- struct ib_udata *udata)
+static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
+ struct ib_udata *udata)
{
int err;
- struct rxe_dev *rxe = to_rdev(ibpd->device);
- struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_qp *qp;
+ struct rxe_dev *rxe = to_rdev(ibqp->device);
+ struct rxe_pd *pd = to_rpd(ibqp->pd);
+ struct rxe_qp *qp = to_rqp(ibqp);
struct rxe_create_qp_resp __user *uresp = NULL;
if (udata) {
if (udata->outlen < sizeof(*uresp))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
uresp = udata->outbuf;
}
if (init->create_flags)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
err = rxe_qp_chk_init(rxe, init);
if (err)
- goto err1;
-
- qp = rxe_alloc(&rxe->qp_pool);
- if (!qp) {
- err = -ENOMEM;
- goto err1;
- }
+ return err;
if (udata) {
- if (udata->inlen) {
- err = -EINVAL;
- goto err2;
- }
+ if (udata->inlen)
+ return -EINVAL;
+
qp->is_user = true;
} else {
qp->is_user = false;
}
- rxe_add_index(qp);
+ err = rxe_add_to_pool(&rxe->qp_pool, qp);
+ if (err)
+ return err;
- err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
+ rxe_add_index(qp);
+ err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
if (err)
- goto err3;
+ goto qp_init;
- return &qp->ibqp;
+ return 0;
-err3:
+qp_init:
rxe_drop_index(qp);
-err2:
rxe_drop_ref(qp);
-err1:
- return ERR_PTR(err);
+ return err;
}
static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
@@ -1145,6 +1138,7 @@ static const struct ib_device_ops rxe_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, rxe_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
@@ -1154,7 +1148,6 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
{
int err;
struct ib_device *dev = &rxe->ib_dev;
- struct crypto_shash *tfm;
strscpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
@@ -1173,13 +1166,9 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
if (err)
return err;
- tfm = crypto_alloc_shash("crc32", 0, 0);
- if (IS_ERR(tfm)) {
- pr_err("failed to allocate crc algorithm err:%ld\n",
- PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
- rxe->tfm = tfm;
+ err = rxe_icrc_init(rxe);
+ if (err)
+ return err;
err = ib_register_device(dev, ibdev_name, NULL);
if (err)
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 959a3260fcab..ac2a2148027f 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -210,8 +210,8 @@ struct rxe_resp_info {
};
struct rxe_qp {
- struct rxe_pool_entry pelem;
struct ib_qp ibqp;
+ struct rxe_pool_entry pelem;
struct ib_qp_attr attr;
unsigned int valid;
unsigned int mtu;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index cf55326f2ab4..9093e6a80b26 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -297,6 +297,7 @@ static const struct ib_device_ops siw_device_ops = {
INIT_RDMA_OBJ_SIZE(ib_cq, siw_cq, base_cq),
INIT_RDMA_OBJ_SIZE(ib_pd, siw_pd, base_pd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, siw_qp, base_qp),
INIT_RDMA_OBJ_SIZE(ib_srq, siw_srq, base_srq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, siw_ucontext, base_ucontext),
};
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index ddb2e66f9f13..7e01f2438afc 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -1344,6 +1344,4 @@ void siw_free_qp(struct kref *ref)
siw_put_tx_cpu(qp->tx_cpu);
atomic_dec(&sdev->num_qp);
- siw_dbg_qp(qp, "free QP\n");
- kfree_rcu(qp, rcu);
}
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 7989c4043db4..1f4e60257700 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -76,7 +76,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
if (unlikely(!p))
return -EFAULT;
- buffer = kmap(p);
+ buffer = kmap_local_page(p);
if (likely(PAGE_SIZE - off >= bytes)) {
memcpy(paddr, buffer + off, bytes);
@@ -84,7 +84,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
unsigned long part = bytes - (PAGE_SIZE - off);
memcpy(paddr, buffer + off, part);
- kunmap(p);
+ kunmap_local(buffer);
if (!mem->is_pbl)
p = siw_get_upage(mem->umem,
@@ -96,10 +96,10 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
if (unlikely(!p))
return -EFAULT;
- buffer = kmap(p);
+ buffer = kmap_local_page(p);
memcpy(paddr + part, buffer, bytes - part);
}
- kunmap(p);
+ kunmap_local(buffer);
}
}
return (int)bytes;
@@ -396,13 +396,20 @@ static int siw_0copy_tx(struct socket *s, struct page **page,
#define MAX_TRAILER (MPA_CRC_SIZE + 4)
-static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
+static void siw_unmap_pages(struct kvec *iov, unsigned long kmap_mask, int len)
{
- while (kmap_mask) {
- if (kmap_mask & BIT(0))
- kunmap(*pp);
- pp++;
- kmap_mask >>= 1;
+ int i;
+
+ /*
+ * Work backwards through the array to honor the kmap_local_page()
+ * ordering requirements.
+ */
+ for (i = (len-1); i >= 0; i--) {
+ if (kmap_mask & BIT(i)) {
+ unsigned long addr = (unsigned long)iov[i].iov_base;
+
+ kunmap_local((void *)(addr & PAGE_MASK));
+ }
}
}
@@ -485,6 +492,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
while (sge_len) {
size_t plen = min((int)PAGE_SIZE - fp_off, sge_len);
+ void *kaddr;
if (!is_kva) {
struct page *p;
@@ -497,7 +505,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
p = siw_get_upage(mem->umem,
sge->laddr + sge_off);
if (unlikely(!p)) {
- siw_unmap_pages(page_array, kmap_mask);
+ siw_unmap_pages(iov, kmap_mask, seg);
wqe->processed -= c_tx->bytes_unsent;
rv = -EFAULT;
goto done_crc;
@@ -505,11 +513,12 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
page_array[seg] = p;
if (!c_tx->use_sendpage) {
- iov[seg].iov_base = kmap(p) + fp_off;
- iov[seg].iov_len = plen;
+ void *kaddr = kmap_local_page(p);
/* Remember for later kunmap() */
kmap_mask |= BIT(seg);
+ iov[seg].iov_base = kaddr + fp_off;
+ iov[seg].iov_len = plen;
if (do_crc)
crypto_shash_update(
@@ -517,10 +526,11 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
iov[seg].iov_base,
plen);
} else if (do_crc) {
+ kaddr = kmap_local_page(p);
crypto_shash_update(c_tx->mpa_crc_hd,
- kmap(p) + fp_off,
+ kaddr + fp_off,
plen);
- kunmap(p);
+ kunmap_local(kaddr);
}
} else {
u64 va = sge->laddr + sge_off;
@@ -540,7 +550,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
if (++seg > (int)MAX_ARRAY) {
siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
- siw_unmap_pages(page_array, kmap_mask);
+ siw_unmap_pages(iov, kmap_mask, seg-1);
wqe->processed -= c_tx->bytes_unsent;
rv = -EMSGSIZE;
goto done_crc;
@@ -591,7 +601,7 @@ sge_done:
} else {
rv = kernel_sendmsg(s, &msg, iov, seg + 1,
hdr_len + data_len + trl_len);
- siw_unmap_pages(page_array, kmap_mask);
+ siw_unmap_pages(iov, kmap_mask, seg);
}
if (rv < (int)hdr_len) {
/* Not even complete hdr pushed or negative rv */
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 3f175f220a22..1b36350601fa 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -285,16 +285,16 @@ siw_mmap_entry_insert(struct siw_ucontext *uctx,
*
* Create QP of requested size on given device.
*
- * @pd: Protection Domain
+ * @qp: Queue pait
* @attrs: Initial QP attributes.
* @udata: used to provide QP ID, SQ and RQ size back to user.
*/
-struct ib_qp *siw_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *attrs,
- struct ib_udata *udata)
+int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
{
- struct siw_qp *qp = NULL;
+ struct ib_pd *pd = ibqp->pd;
+ struct siw_qp *qp = to_siw_qp(ibqp);
struct ib_device *base_dev = pd->device;
struct siw_device *sdev = to_siw_dev(base_dev);
struct siw_ucontext *uctx =
@@ -307,17 +307,16 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
siw_dbg(base_dev, "create new QP\n");
if (attrs->create_flags)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
siw_dbg(base_dev, "too many QP's\n");
- rv = -ENOMEM;
- goto err_out;
+ return -ENOMEM;
}
if (attrs->qp_type != IB_QPT_RC) {
siw_dbg(base_dev, "only RC QP's supported\n");
rv = -EOPNOTSUPP;
- goto err_out;
+ goto err_atomic;
}
if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
(attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
@@ -325,13 +324,13 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
(attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
siw_dbg(base_dev, "QP size error\n");
rv = -EINVAL;
- goto err_out;
+ goto err_atomic;
}
if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
siw_dbg(base_dev, "max inline send: %d > %d\n",
attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
rv = -EINVAL;
- goto err_out;
+ goto err_atomic;
}
/*
* NOTE: we allow for zero element SQ and RQ WQE's SGL's
@@ -340,19 +339,15 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) {
siw_dbg(base_dev, "QP must have send or receive queue\n");
rv = -EINVAL;
- goto err_out;
+ goto err_atomic;
}
if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
rv = -EINVAL;
- goto err_out;
- }
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- rv = -ENOMEM;
- goto err_out;
+ goto err_atomic;
}
+
init_rwsem(&qp->state_lock);
spin_lock_init(&qp->sq_lock);
spin_lock_init(&qp->rq_lock);
@@ -360,7 +355,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
rv = siw_qp_add(sdev, qp);
if (rv)
- goto err_out;
+ goto err_atomic;
num_sqe = attrs->cap.max_send_wr;
num_rqe = attrs->cap.max_recv_wr;
@@ -482,23 +477,20 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
list_add_tail(&qp->devq, &sdev->qp_list);
spin_unlock_irqrestore(&sdev->lock, flags);
- return &qp->base_qp;
+ return 0;
err_out_xa:
xa_erase(&sdev->qp_xa, qp_id(qp));
-err_out:
- if (qp) {
- if (uctx) {
- rdma_user_mmap_entry_remove(qp->sq_entry);
- rdma_user_mmap_entry_remove(qp->rq_entry);
- }
- vfree(qp->sendq);
- vfree(qp->recvq);
- kfree(qp);
+ if (uctx) {
+ rdma_user_mmap_entry_remove(qp->sq_entry);
+ rdma_user_mmap_entry_remove(qp->rq_entry);
}
- atomic_dec(&sdev->num_qp);
+ vfree(qp->sendq);
+ vfree(qp->recvq);
- return ERR_PTR(rv);
+err_atomic:
+ atomic_dec(&sdev->num_qp);
+ return rv;
}
/*
diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h
index 67ac08886a70..09964234f8d3 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.h
+++ b/drivers/infiniband/sw/siw/siw_verbs.h
@@ -50,9 +50,8 @@ int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
union ib_gid *gid);
int siw_alloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
int siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
-struct ib_qp *siw_create_qp(struct ib_pd *base_pd,
- struct ib_qp_init_attr *attr,
- struct ib_udata *udata);
+int siw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata);
int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr,
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index afec40da9b58..9776b755d848 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -159,7 +159,7 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
domain->sig.dif.pi_interval = scsi_prot_interval(sc);
- domain->sig.dif.ref_tag = t10_pi_ref_tag(sc->request);
+ domain->sig.dif.ref_tag = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
/*
* At the moment we hard code those, but in the future
* we will take them from sc.
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b44cbb8e84eb..b566f7cb7797 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -949,7 +949,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
sector_t sector_off = mr_status.sig_err.sig_err_offset;
sector_div(sector_off, sector_size + 8);
- *sector = scsi_get_lba(iser_task->sc) + sector_off;
+ *sector = scsi_get_sector(iser_task->sc) + sector_off;
iser_err("PI error found type %d at sector %llx "
"expected %x vs actual %x\n",
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
index 26bbe5d6dff5..5e780bdd763d 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -20,7 +20,7 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
cpu = raw_smp_processor_id();
s = this_cpu_ptr(stats->pcpu_stats);
- if (unlikely(con->cpu != cpu)) {
+ if (con->cpu != cpu) {
s->cpu_migr.to++;
/* Careful here, override s pointer */
@@ -180,7 +180,7 @@ void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
len = req->usr_len + req->data_len;
rtrs_clt_update_rdma_stats(stats, len, dir);
- if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_inc(&stats->inflight);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index f2c40e50f25e..bc8824b4ee0d 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -75,9 +75,9 @@ __rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
*/
do {
bit = find_first_zero_bit(clt->permits_map, max_depth);
- if (unlikely(bit >= max_depth))
+ if (bit >= max_depth)
return NULL;
- } while (unlikely(test_and_set_bit_lock(bit, clt->permits_map)));
+ } while (test_and_set_bit_lock(bit, clt->permits_map));
permit = get_permit(clt, bit);
WARN_ON(permit->mem_id != bit);
@@ -115,14 +115,14 @@ struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt,
DEFINE_WAIT(wait);
permit = __rtrs_get_permit(clt, con_type);
- if (likely(permit) || !can_wait)
+ if (permit || !can_wait)
return permit;
do {
prepare_to_wait(&clt->permits_wait, &wait,
TASK_UNINTERRUPTIBLE);
permit = __rtrs_get_permit(clt, con_type);
- if (likely(permit))
+ if (permit)
break;
io_schedule();
@@ -175,7 +175,7 @@ struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
{
int id = 0;
- if (likely(permit->con_type == RTRS_IO_CON))
+ if (permit->con_type == RTRS_IO_CON)
id = (permit->cpu_id % (sess->s.irq_con_num - 1)) + 1;
return to_clt_con(sess->s.con[id]);
@@ -329,7 +329,7 @@ static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_SUCCESS) {
rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
@@ -349,13 +349,13 @@ static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
container_of(wc->wr_cqe, typeof(*req), inv_cqe);
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_SUCCESS) {
rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
req->need_inv = false;
- if (likely(req->need_inv_comp))
+ if (req->need_inv_comp)
complete(&req->inv_comp);
else
/* Complete request from INV callback */
@@ -390,7 +390,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
sess = to_clt_sess(con->c.sess);
if (req->sg_cnt) {
- if (unlikely(req->dir == DMA_FROM_DEVICE && req->need_inv)) {
+ if (req->dir == DMA_FROM_DEVICE && req->need_inv) {
/*
* We are here to invalidate read requests
* ourselves. In normal scenario server should
@@ -405,7 +405,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
* should do that ourselves.
*/
- if (likely(can_wait)) {
+ if (can_wait) {
req->need_inv_comp = true;
} else {
/* This should be IO path, so always notify */
@@ -416,10 +416,10 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
refcount_inc(&req->ref);
err = rtrs_inv_rkey(req);
- if (unlikely(err)) {
+ if (err) {
rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
req->mr->rkey, err);
- } else if (likely(can_wait)) {
+ } else if (can_wait) {
wait_for_completion(&req->inv_comp);
} else {
/*
@@ -438,7 +438,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
}
if (!refcount_dec_and_test(&req->ref))
return;
- if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&sess->stats->inflight);
req->in_use = false;
@@ -463,7 +463,7 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
enum ib_send_flags flags;
struct ib_sge sge;
- if (unlikely(!req->sg_size)) {
+ if (!req->sg_size) {
rtrs_wrn(con->c.sess,
"Doing RDMA Write failed, no data supplied\n");
return -EINVAL;
@@ -478,7 +478,7 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
- flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
+ flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
0 : IB_SEND_SIGNALED;
ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
@@ -513,7 +513,7 @@ static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
iu = container_of(wc->wr_cqe, struct rtrs_iu,
cqe);
err = rtrs_iu_post_recv(&con->c, iu);
- if (unlikely(err)) {
+ if (err) {
rtrs_err(con->c.sess, "post iu failed %d\n", err);
rtrs_rdma_error_recovery(con);
}
@@ -533,7 +533,7 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
- if (unlikely(wc->byte_len < sizeof(*msg))) {
+ if (wc->byte_len < sizeof(*msg)) {
rtrs_err(con->c.sess, "rkey response is malformed: size %d\n",
wc->byte_len);
goto out;
@@ -541,7 +541,7 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
- if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP)) {
+ if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) {
rtrs_err(sess->clt, "rkey response is malformed: type %d\n",
le16_to_cpu(msg->type));
goto out;
@@ -551,8 +551,8 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
goto out;
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
- if (likely(imm_type == RTRS_IO_RSP_IMM ||
- imm_type == RTRS_IO_RSP_W_INV_IMM)) {
+ if (imm_type == RTRS_IO_RSP_IMM ||
+ imm_type == RTRS_IO_RSP_W_INV_IMM) {
u32 msg_id;
w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
@@ -605,7 +605,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
bool w_inval = false;
int err;
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_SUCCESS) {
if (wc->status != IB_WC_WR_FLUSH_ERR) {
rtrs_err(sess->clt, "RDMA failed: %s\n",
ib_wc_status_msg(wc->status));
@@ -625,8 +625,8 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
return;
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
&imm_type, &imm_payload);
- if (likely(imm_type == RTRS_IO_RSP_IMM ||
- imm_type == RTRS_IO_RSP_W_INV_IMM)) {
+ if (imm_type == RTRS_IO_RSP_IMM ||
+ imm_type == RTRS_IO_RSP_W_INV_IMM) {
u32 msg_id;
w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
@@ -657,7 +657,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
else
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
- if (unlikely(err)) {
+ if (err) {
rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
err);
rtrs_rdma_error_recovery(con);
@@ -680,6 +680,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
case IB_WC_RDMA_WRITE:
/*
* post_send() RDMA write completions of IO reqs (read/write)
+ * and hb.
*/
break;
@@ -702,7 +703,7 @@ static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
} else {
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
}
- if (unlikely(err))
+ if (err)
return err;
}
@@ -727,7 +728,7 @@ static int post_recv_sess(struct rtrs_clt_sess *sess)
q_size *= 2;
err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size);
- if (unlikely(err)) {
+ if (err) {
rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err);
return err;
}
@@ -788,7 +789,7 @@ static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
ppcpu_path = this_cpu_ptr(clt->pcpu_path);
path = rcu_dereference(*ppcpu_path);
- if (unlikely(!path))
+ if (!path)
path = list_first_or_null_rcu(&clt->paths_list,
typeof(*path), s.entry);
else
@@ -819,10 +820,10 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
int inflight;
list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
- if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
+ if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
continue;
- if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
+ if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))
continue;
inflight = atomic_read(&sess->stats->inflight);
@@ -870,10 +871,10 @@ static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it)
ktime_t latency;
list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
- if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
+ if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
continue;
- if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
+ if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))
continue;
latency = sess->s.hb_cur_latency;
@@ -963,6 +964,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
req->need_inv_comp = false;
req->inv_errno = 0;
refcount_set(&req->ref, 1);
+ req->mp_policy = sess->clt->mp_policy;
iov_iter_kvec(&iter, READ, vec, 1, usr_len);
len = _copy_from_iter(req->iu->buf, usr_len, &iter);
@@ -1043,7 +1045,7 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
- flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
+ flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
0 : IB_SEND_SIGNALED;
ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
@@ -1062,7 +1064,7 @@ static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
if (nr < 0)
return nr;
- if (unlikely(nr < req->sg_cnt))
+ if (nr < req->sg_cnt)
return -EINVAL;
ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
@@ -1086,7 +1088,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
- if (unlikely(tsize > sess->chunk_size)) {
+ if (tsize > sess->chunk_size) {
rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
tsize, sess->chunk_size);
return -EMSGSIZE;
@@ -1094,7 +1096,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
if (req->sg_cnt) {
count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
- if (unlikely(!count)) {
+ if (!count) {
rtrs_wrn(s, "Write request failed, map failed\n");
return -EINVAL;
}
@@ -1148,12 +1150,12 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en,
req->usr_len + sizeof(*msg),
imm, wr, &inv_wr);
- if (unlikely(ret)) {
+ if (ret) {
rtrs_err_rl(s,
"Write request failed: error=%d path=%s [%s:%u]\n",
ret, kobject_name(&sess->kobj), sess->hca_name,
sess->hca_port);
- if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&sess->stats->inflight);
if (req->sg_cnt)
ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
@@ -1179,7 +1181,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
- if (unlikely(tsize > sess->chunk_size)) {
+ if (tsize > sess->chunk_size) {
rtrs_wrn(s,
"Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
tsize, sess->chunk_size);
@@ -1189,7 +1191,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
if (req->sg_cnt) {
count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
req->dir);
- if (unlikely(!count)) {
+ if (!count) {
rtrs_wrn(s,
"Read request failed, dma map failed\n");
return -EINVAL;
@@ -1254,12 +1256,12 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
req->data_len, imm, wr);
- if (unlikely(ret)) {
+ if (ret) {
rtrs_err_rl(s,
"Read request failed: error=%d path=%s [%s:%u]\n",
ret, kobject_name(&sess->kobj), sess->hca_name,
sess->hca_port);
- if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&sess->stats->inflight);
req->need_inv = false;
if (req->sg_cnt)
@@ -1287,15 +1289,14 @@ static int rtrs_clt_failover_req(struct rtrs_clt *clt,
for (path_it_init(&it, clt);
(alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num;
it.i++) {
- if (unlikely(READ_ONCE(alive_sess->state) !=
- RTRS_CLT_CONNECTED))
+ if (READ_ONCE(alive_sess->state) != RTRS_CLT_CONNECTED)
continue;
req = rtrs_clt_get_copy_req(alive_sess, fail_req);
if (req->dir == DMA_TO_DEVICE)
err = rtrs_clt_write_req(req);
else
err = rtrs_clt_read_req(req);
- if (unlikely(err)) {
+ if (err) {
req->in_use = false;
continue;
}
@@ -1330,7 +1331,7 @@ static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
complete_rdma_req(req, -ECONNABORTED, false, true);
err = rtrs_clt_failover_req(clt, req);
- if (unlikely(err))
+ if (err)
/* Failover failed, notify anyway */
req->conf(req->priv, err);
}
@@ -1601,7 +1602,8 @@ static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
con->c.cid = cid;
con->c.sess = &sess->s;
- atomic_set(&con->io_cnt, 0);
+ /* Align with srv, init as 1 */
+ atomic_set(&con->c.wr_cnt, 1);
mutex_init(&con->con_mutex);
sess->s.con[cid] = &con->c;
@@ -1678,6 +1680,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
sess->queue_depth * 3 + 1);
max_send_sge = 2;
}
+ atomic_set(&con->c.sq_wr_avail, max_send_wr);
cq_num = max_send_wr + max_recv_wr;
/* alloc iu to recv new rkey reply when server reports flags set */
if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
@@ -1841,13 +1844,14 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
}
if (!sess->rbufs) {
- kfree(sess->rbufs);
sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
GFP_KERNEL);
if (!sess->rbufs)
return -ENOMEM;
}
sess->queue_depth = queue_depth;
+ sess->s.signal_interval = min_not_zero(queue_depth,
+ (unsigned short) SERVICE_CON_QUEUE_DEPTH);
sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
sess->max_io_size = le32_to_cpu(msg->max_io_size);
sess->flags = le32_to_cpu(msg->flags);
@@ -1958,7 +1962,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
break;
case RDMA_CM_EVENT_ESTABLISHED:
cm_err = rtrs_rdma_conn_established(con, ev);
- if (likely(!cm_err)) {
+ if (!cm_err) {
/*
* Report success and wake up. Here we abuse state_wq,
* i.e. wake up without state change, but we set cm_err.
@@ -2377,7 +2381,7 @@ static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_SUCCESS) {
rtrs_err(sess->clt, "Sess info request send failed: %s\n",
ib_wc_status_msg(wc->status));
rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
@@ -2394,7 +2398,7 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
int i, sgi;
sg_cnt = le16_to_cpu(msg->sg_cnt);
- if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) {
+ if (!sg_cnt || (sess->queue_depth % sg_cnt)) {
rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
sg_cnt);
return -EINVAL;
@@ -2404,9 +2408,8 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
* Check if IB immediate data size is enough to hold the mem_id and
* the offset inside the memory chunk.
*/
- if (unlikely((ilog2(sg_cnt - 1) + 1) +
- (ilog2(sess->chunk_size - 1) + 1) >
- MAX_IMM_PAYL_BITS)) {
+ if ((ilog2(sg_cnt - 1) + 1) + (ilog2(sess->chunk_size - 1) + 1) >
+ MAX_IMM_PAYL_BITS) {
rtrs_err(sess->clt,
"RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size);
@@ -2424,7 +2427,7 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
total_len += len;
- if (unlikely(!len || (len % sess->chunk_size))) {
+ if (!len || (len % sess->chunk_size)) {
rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi,
len);
return -EINVAL;
@@ -2438,11 +2441,11 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
}
}
/* Sanity check */
- if (unlikely(sgi != sg_cnt || i != sess->queue_depth)) {
+ if (sgi != sg_cnt || i != sess->queue_depth) {
rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n");
return -EINVAL;
}
- if (unlikely(total_len != sess->chunk_size * sess->queue_depth)) {
+ if (total_len != sess->chunk_size * sess->queue_depth) {
rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len);
return -EINVAL;
}
@@ -2464,14 +2467,14 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(con->c.cid);
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_SUCCESS) {
rtrs_err(sess->clt, "Sess info response recv failed: %s\n",
ib_wc_status_msg(wc->status));
goto out;
}
WARN_ON(wc->opcode != IB_WC_RECV);
- if (unlikely(wc->byte_len < sizeof(*msg))) {
+ if (wc->byte_len < sizeof(*msg)) {
rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
wc->byte_len);
goto out;
@@ -2479,24 +2482,24 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
- if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP)) {
+ if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) {
rtrs_err(sess->clt, "Sess info response is malformed: type %d\n",
le16_to_cpu(msg->type));
goto out;
}
rx_sz = sizeof(*msg);
rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
- if (unlikely(wc->byte_len < rx_sz)) {
+ if (wc->byte_len < rx_sz) {
rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
wc->byte_len);
goto out;
}
err = process_info_rsp(sess, msg);
- if (unlikely(err))
+ if (err)
goto out;
err = post_recv_sess(sess);
- if (unlikely(err))
+ if (err)
goto out;
state = RTRS_CLT_CONNECTED;
@@ -2523,13 +2526,13 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
rtrs_clt_info_req_done);
rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
- if (unlikely(!tx_iu || !rx_iu)) {
+ if (!tx_iu || !rx_iu) {
err = -ENOMEM;
goto out;
}
/* Prepare for getting info response */
err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
- if (unlikely(err)) {
+ if (err) {
rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err);
goto out;
}
@@ -2544,7 +2547,7 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
/* Send info request */
err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
- if (unlikely(err)) {
+ if (err) {
rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err);
goto out;
}
@@ -2555,7 +2558,7 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
sess->state != RTRS_CLT_CONNECTING,
msecs_to_jiffies(
RTRS_CONNECT_TIMEOUT_MS));
- if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) {
+ if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) {
if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR)
err = -ECONNRESET;
else
@@ -2567,7 +2570,7 @@ out:
rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
if (rx_iu)
rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
- if (unlikely(err))
+ if (err)
/* If we've never taken async path because of malloc problems */
rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
@@ -2915,7 +2918,7 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
&old_state);
} while (!changed && old_state != RTRS_CLT_DEAD);
- if (likely(changed)) {
+ if (changed) {
rtrs_clt_remove_path_from_arr(sess);
rtrs_clt_destroy_sess_files(sess, sysfs_self);
kobject_put(&sess->kobj);
@@ -2987,10 +2990,10 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
rcu_read_lock();
for (path_it_init(&it, clt);
(sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
- if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
+ if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
continue;
- if (unlikely(usr_len + hdr_len > sess->max_hdr_size)) {
+ if (usr_len + hdr_len > sess->max_hdr_size) {
rtrs_wrn_rl(sess->clt,
"%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
dir == READ ? "Read" : "Write",
@@ -3005,7 +3008,7 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
err = rtrs_clt_read_req(req);
else
err = rtrs_clt_write_req(req);
- if (unlikely(err)) {
+ if (err) {
req->in_use = false;
continue;
}
@@ -3078,6 +3081,18 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
if (IS_ERR(sess))
return PTR_ERR(sess);
+ mutex_lock(&clt->paths_mutex);
+ if (clt->paths_num == 0) {
+ /*
+ * When all the paths are removed for a session,
+ * the addition of the first path is like a new session for
+ * the storage server
+ */
+ sess->for_new_clt = 1;
+ }
+
+ mutex_unlock(&clt->paths_mutex);
+
/*
* It is totally safe to add path in CONNECTING state: coming
* IO will never grab it. Also it is very important to add
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index e276a2dfcf7c..9dc819885ec7 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -74,7 +74,6 @@ struct rtrs_clt_con {
u32 queue_num;
unsigned int cpu;
struct mutex con_mutex;
- atomic_t io_cnt;
int cm_err;
};
@@ -102,6 +101,7 @@ struct rtrs_clt_io_req {
unsigned int usr_len;
void *priv;
bool in_use;
+ enum rtrs_mp_policy mp_policy;
struct rtrs_clt_con *con;
struct rtrs_sg_desc *desc;
struct ib_sge *sge;
@@ -230,10 +230,7 @@ int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats, char *buf,
size_t len);
int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable);
int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
- size_t len);
-int rtrs_clt_reset_wc_comp_stats(struct rtrs_clt_stats *stats, bool enable);
-int rtrs_clt_stats_wc_completion_to_str(struct rtrs_clt_stats *stats, char *buf,
- size_t len);
+ size_t len);
int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable);
ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
char *page, size_t len);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 36f184a3b676..d12ddfa50747 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -96,6 +96,8 @@ struct rtrs_con {
struct rdma_cm_id *cm_id;
unsigned int cid;
int nr_cqe;
+ atomic_t wr_cnt;
+ atomic_t sq_wr_avail;
};
struct rtrs_sess {
@@ -108,6 +110,7 @@ struct rtrs_sess {
unsigned int con_num;
unsigned int irq_con_num;
unsigned int recon_cnt;
+ unsigned int signal_interval;
struct rtrs_ib_dev *dev;
int dev_ref;
struct ib_cqe *hb_cqe;
@@ -309,9 +312,6 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
struct ib_send_wr *tail);
int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe);
-int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
- u32 imm_data, enum ib_send_flags flags,
- struct ib_send_wr *head);
int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
u32 max_send_sge, int cq_vector, int nr_cqe,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 3df290086169..716ef7b23558 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -183,7 +183,7 @@ static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_sess *s = con->c.sess;
struct rtrs_srv_sess *sess = to_srv_sess(s);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "REG MR failed: %s\n",
ib_wc_status_msg(wc->status));
close_sess(sess);
@@ -201,7 +201,6 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
struct rtrs_srv_sess *sess = to_srv_sess(s);
dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
struct rtrs_srv_mr *srv_mr;
- struct rtrs_srv *srv = sess->srv;
struct ib_send_wr inv_wr;
struct ib_rdma_wr imm_wr;
struct ib_rdma_wr *wr = NULL;
@@ -216,7 +215,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt);
need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F;
- if (unlikely(sg_cnt != 1))
+ if (sg_cnt != 1)
return -EINVAL;
offset = 0;
@@ -229,7 +228,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
/* WR will fail with length error
* if this is 0
*/
- if (unlikely(plist->length == 0)) {
+ if (plist->length == 0) {
rtrs_err(s, "Invalid RDMA-Write sg list length 0\n");
return -EINVAL;
}
@@ -269,7 +268,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
* From time to time we have to post signaled sends,
* or send queue will fill up and only QP reset can help.
*/
- flags = (atomic_inc_return(&id->con->wr_cnt) % srv->queue_depth) ?
+ flags = (atomic_inc_return(&id->con->c.wr_cnt) % s->signal_interval) ?
0 : IB_SEND_SIGNALED;
if (need_inval) {
@@ -322,7 +321,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
offset, DMA_BIDIRECTIONAL);
err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
- if (unlikely(err))
+ if (err)
rtrs_err(s,
"Posting RDMA-Write-Request to QP failed, err: %d\n",
err);
@@ -347,7 +346,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
struct ib_send_wr inv_wr, *wr = NULL;
struct ib_rdma_wr imm_wr;
struct ib_reg_wr rwr;
- struct rtrs_srv *srv = sess->srv;
struct rtrs_srv_mr *srv_mr;
bool need_inval = false;
enum ib_send_flags flags;
@@ -363,7 +361,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
sg_cnt = le16_to_cpu(rd_msg->sg_cnt);
if (need_inval) {
- if (likely(sg_cnt)) {
+ if (sg_cnt) {
inv_wr.wr_cqe = &io_comp_cqe;
inv_wr.sg_list = NULL;
inv_wr.num_sge = 0;
@@ -396,7 +394,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
- flags = (atomic_inc_return(&con->wr_cnt) % srv->queue_depth) ?
+ flags = (atomic_inc_return(&con->c.wr_cnt) % s->signal_interval) ?
0 : IB_SEND_SIGNALED;
imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
imm_wr.wr.next = NULL;
@@ -439,7 +437,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
imm_wr.wr.ex.imm_data = cpu_to_be32(imm);
err = ib_post_send(id->con->c.qp, wr, NULL);
- if (unlikely(err))
+ if (err)
rtrs_err_rl(s, "Posting RDMA-Reply to QP failed, err: %d\n",
err);
@@ -496,7 +494,7 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
id->status = status;
- if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ if (sess->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
"Sending I/O response failed, session %s is disconnected, sess state %s\n",
kobject_name(&sess->kobj),
@@ -508,12 +506,11 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
}
- if (unlikely(atomic_sub_return(1,
- &con->sq_wr_avail) < 0)) {
+ if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) {
rtrs_err(s, "IB send queue full: sess=%s cid=%d\n",
kobject_name(&sess->kobj),
con->c.cid);
- atomic_add(1, &con->sq_wr_avail);
+ atomic_add(1, &con->c.sq_wr_avail);
spin_lock(&con->rsp_wr_wait_lock);
list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
spin_unlock(&con->rsp_wr_wait_lock);
@@ -525,7 +522,7 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
else
err = rdma_write_sg(id);
- if (unlikely(err)) {
+ if (err) {
rtrs_err_rl(s, "IO response failed: %d: sess=%s\n", err,
kobject_name(&sess->kobj));
close_sess(sess);
@@ -712,7 +709,7 @@ static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "Sess info response send failed: %s\n",
ib_wc_status_msg(wc->status));
close_sess(sess);
@@ -801,7 +798,7 @@ static int process_info_req(struct rtrs_srv_con *con,
size_t tx_sz;
err = post_recv_sess(sess);
- if (unlikely(err)) {
+ if (err) {
rtrs_err(s, "post_recv_sess(), err: %d\n", err);
return err;
}
@@ -814,14 +811,14 @@ static int process_info_req(struct rtrs_srv_con *con,
strscpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname));
rwr = kcalloc(sess->mrs_num, sizeof(*rwr), GFP_KERNEL);
- if (unlikely(!rwr))
+ if (!rwr)
return -ENOMEM;
tx_sz = sizeof(*rsp);
tx_sz += sizeof(rsp->desc[0]) * sess->mrs_num;
tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
- if (unlikely(!tx_iu)) {
+ if (!tx_iu) {
err = -ENOMEM;
goto rwr_free;
}
@@ -853,7 +850,7 @@ static int process_info_req(struct rtrs_srv_con *con,
}
err = rtrs_srv_create_sess_files(sess);
- if (unlikely(err))
+ if (err)
goto iu_free;
kobject_get(&sess->kobj);
get_device(&sess->srv->dev);
@@ -873,7 +870,7 @@ static int process_info_req(struct rtrs_srv_con *con,
/* Send info response */
err = rtrs_iu_post_send(&con->c, tx_iu, tx_sz, reg_wr);
- if (unlikely(err)) {
+ if (err) {
rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
iu_free:
rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
@@ -896,14 +893,14 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(con->c.cid);
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "Sess info request receive failed: %s\n",
ib_wc_status_msg(wc->status));
goto close;
}
WARN_ON(wc->opcode != IB_WC_RECV);
- if (unlikely(wc->byte_len < sizeof(*msg))) {
+ if (wc->byte_len < sizeof(*msg)) {
rtrs_err(s, "Sess info request is malformed: size %d\n",
wc->byte_len);
goto close;
@@ -911,13 +908,13 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
- if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ)) {
+ if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) {
rtrs_err(s, "Sess info request is malformed: type %d\n",
le16_to_cpu(msg->type));
goto close;
}
err = process_info_req(con, msg);
- if (unlikely(err))
+ if (err)
goto close;
out:
@@ -938,11 +935,11 @@ static int post_recv_info_req(struct rtrs_srv_con *con)
rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
GFP_KERNEL, sess->s.dev->ib_dev,
DMA_FROM_DEVICE, rtrs_srv_info_req_done);
- if (unlikely(!rx_iu))
+ if (!rx_iu)
return -ENOMEM;
/* Prepare for getting info response */
err = rtrs_iu_post_recv(&con->c, rx_iu);
- if (unlikely(err)) {
+ if (err) {
rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
return err;
@@ -957,7 +954,7 @@ static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
for (i = 0; i < q_size; i++) {
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
- if (unlikely(err))
+ if (err)
return err;
}
@@ -978,7 +975,7 @@ static int post_recv_sess(struct rtrs_srv_sess *sess)
q_size = srv->queue_depth;
err = post_recv_io(to_srv_con(sess->s.con[cid]), q_size);
- if (unlikely(err)) {
+ if (err) {
rtrs_err(s, "post_recv_io(), err: %d\n", err);
return err;
}
@@ -1001,13 +998,13 @@ static void process_read(struct rtrs_srv_con *con,
void *data;
int ret;
- if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ if (sess->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
"Processing read request failed, session is disconnected, sess state %s\n",
rtrs_srv_state_str(sess->state));
return;
}
- if (unlikely(msg->sg_cnt != 1 && msg->sg_cnt != 0)) {
+ if (msg->sg_cnt != 1 && msg->sg_cnt != 0) {
rtrs_err_rl(s,
"Processing read request failed, invalid message\n");
return;
@@ -1025,7 +1022,7 @@ static void process_read(struct rtrs_srv_con *con,
ret = ctx->ops.rdma_ev(srv->priv, id, READ, data, data_len,
data + data_len, usr_len);
- if (unlikely(ret)) {
+ if (ret) {
rtrs_err_rl(s,
"Processing read request failed, user module cb reported for msg_id %d, err: %d\n",
buf_id, ret);
@@ -1059,7 +1056,7 @@ static void process_write(struct rtrs_srv_con *con,
void *data;
int ret;
- if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ if (sess->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
"Processing write request failed, session is disconnected, sess state %s\n",
rtrs_srv_state_str(sess->state));
@@ -1076,8 +1073,8 @@ static void process_write(struct rtrs_srv_con *con,
data_len = off - usr_len;
data = page_address(srv->chunks[buf_id]);
ret = ctx->ops.rdma_ev(srv->priv, id, WRITE, data, data_len,
- data + data_len, usr_len);
- if (unlikely(ret)) {
+ data + data_len, usr_len);
+ if (ret) {
rtrs_err_rl(s,
"Processing write request failed, user module callback reports err: %d\n",
ret);
@@ -1141,7 +1138,7 @@ static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
u32 msg_id, off;
void *data;
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
ib_wc_status_msg(wc->status));
close_sess(sess);
@@ -1198,7 +1195,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
u32 imm_type, imm_payload;
int err;
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_SUCCESS) {
if (wc->status != IB_WC_WR_FLUSH_ERR) {
rtrs_err(s,
"%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
@@ -1218,21 +1215,20 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
return;
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
- if (unlikely(err)) {
+ if (err) {
rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
close_sess(sess);
break;
}
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
&imm_type, &imm_payload);
- if (likely(imm_type == RTRS_IO_REQ_IMM)) {
+ if (imm_type == RTRS_IO_REQ_IMM) {
u32 msg_id, off;
void *data;
msg_id = imm_payload >> sess->mem_bits;
off = imm_payload & ((1 << sess->mem_bits) - 1);
- if (unlikely(msg_id >= srv->queue_depth ||
- off >= max_chunk_size)) {
+ if (msg_id >= srv->queue_depth || off >= max_chunk_size) {
rtrs_err(s, "Wrong msg_id %u, off %u\n",
msg_id, off);
close_sess(sess);
@@ -1244,7 +1240,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
mr->msg_off = off;
mr->msg_id = msg_id;
err = rtrs_srv_inv_rkey(con, mr);
- if (unlikely(err)) {
+ if (err) {
rtrs_err(s, "rtrs_post_recv(), err: %d\n",
err);
close_sess(sess);
@@ -1268,10 +1264,11 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
case IB_WC_SEND:
/*
* post_send() RDMA write completions of IO reqs (read/write)
+ * and hb.
*/
- atomic_add(srv->queue_depth, &con->sq_wr_avail);
+ atomic_add(s->signal_interval, &con->c.sq_wr_avail);
- if (unlikely(!list_empty_careful(&con->rsp_wr_wait_list)))
+ if (!list_empty_careful(&con->rsp_wr_wait_list))
rtrs_rdma_process_wr_wait_list(con);
break;
@@ -1648,7 +1645,7 @@ static int create_con(struct rtrs_srv_sess *sess,
con->c.cm_id = cm_id;
con->c.sess = &sess->s;
con->c.cid = cid;
- atomic_set(&con->wr_cnt, 1);
+ atomic_set(&con->c.wr_cnt, 1);
wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
if (con->c.cid == 0) {
@@ -1659,6 +1656,8 @@ static int create_con(struct rtrs_srv_sess *sess,
max_send_wr = min_t(int, wr_limit,
SERVICE_CON_QUEUE_DEPTH * 2 + 2);
max_recv_wr = max_send_wr;
+ s->signal_interval = min_not_zero(srv->queue_depth,
+ (size_t)SERVICE_CON_QUEUE_DEPTH);
} else {
/* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
if (always_invalidate)
@@ -1679,7 +1678,7 @@ static int create_con(struct rtrs_srv_sess *sess,
*/
}
cq_num = max_send_wr + max_recv_wr;
- atomic_set(&con->sq_wr_avail, max_send_wr);
+ atomic_set(&con->c.sq_wr_avail, max_send_wr);
cq_vector = rtrs_srv_get_next_cq_vector(sess);
/* TODO: SOFTIRQ can be faster, but be careful with softirq context */
@@ -1894,7 +1893,7 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
err = create_con(sess, cm_id, cid);
if (err) {
rtrs_err((&sess->s), "create_con(), error %d\n", err);
- (void)rtrs_rdma_do_reject(cm_id, err);
+ rtrs_rdma_do_reject(cm_id, err);
/*
* Since session has other connections we follow normal way
* through workqueue, but still return an error to tell cma.c
@@ -1905,7 +1904,7 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
err = rtrs_rdma_do_accept(sess, cm_id);
if (err) {
rtrs_err((&sess->s), "rtrs_rdma_do_accept(), error %d\n", err);
- (void)rtrs_rdma_do_reject(cm_id, err);
+ rtrs_rdma_do_reject(cm_id, err);
/*
* Since current connection was successfully added to the
* session we follow normal way through workqueue to close the
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index f8da2e3f0bda..9d8d2a91a235 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -42,8 +42,6 @@ struct rtrs_srv_stats {
struct rtrs_srv_con {
struct rtrs_con c;
- atomic_t wr_cnt;
- atomic_t sq_wr_avail;
struct list_head rsp_wr_wait_list;
spinlock_t rsp_wr_wait_lock;
};
@@ -140,10 +138,6 @@ static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable);
ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
char *page, size_t len);
-int rtrs_srv_reset_wc_completion_stats(struct rtrs_srv_stats *stats,
- bool enable);
-int rtrs_srv_stats_wc_completion_to_str(struct rtrs_srv_stats *stats, char *buf,
- size_t len);
int rtrs_srv_reset_all_stats(struct rtrs_srv_stats *stats, bool enable);
ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats,
char *page, size_t len);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index 61919ebd92b2..ca542e477d38 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -182,22 +182,28 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
}
EXPORT_SYMBOL_GPL(rtrs_iu_post_rdma_write_imm);
-int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
- u32 imm_data, enum ib_send_flags flags,
- struct ib_send_wr *head)
+static int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con,
+ struct ib_cqe *cqe,
+ u32 imm_data,
+ struct ib_send_wr *head)
{
struct ib_rdma_wr wr;
+ struct rtrs_sess *sess = con->sess;
+ enum ib_send_flags sflags;
+
+ atomic_dec_if_positive(&con->sq_wr_avail);
+ sflags = (atomic_inc_return(&con->wr_cnt) % sess->signal_interval) ?
+ 0 : IB_SEND_SIGNALED;
wr = (struct ib_rdma_wr) {
.wr.wr_cqe = cqe,
- .wr.send_flags = flags,
+ .wr.send_flags = sflags,
.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
.wr.ex.imm_data = cpu_to_be32(imm_data),
};
return rtrs_post_send(con->qp, head, &wr.wr, NULL);
}
-EXPORT_SYMBOL_GPL(rtrs_post_rdma_write_imm_empty);
static void qp_event_handler(struct ib_event *ev, void *ctx)
{
@@ -314,8 +320,9 @@ void rtrs_send_hb_ack(struct rtrs_sess *sess)
imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
- 0, NULL);
+ NULL);
if (err) {
+ rtrs_err(sess, "send HB ACK failed, errno: %d\n", err);
sess->hb_err_handler(usr_con);
return;
}
@@ -333,6 +340,7 @@ static void hb_work(struct work_struct *work)
usr_con = sess->con[0];
if (sess->hb_missed_cnt > sess->hb_missed_max) {
+ rtrs_err(sess, "HB missed max reached.\n");
sess->hb_err_handler(usr_con);
return;
}
@@ -346,8 +354,9 @@ static void hb_work(struct work_struct *work)
imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
- 0, NULL);
+ NULL);
if (err) {
+ rtrs_err(sess, "HB send failed, errno: %d\n", err);
sess->hb_err_handler(usr_con);
return;
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 8d5cf5eb5778..71eda91e810c 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1280,7 +1280,7 @@ static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr,
{
struct srp_terminate_context *context = context_ptr;
struct srp_target_port *target = context->srp_target;
- u32 tag = blk_mq_unique_tag(scmnd->request);
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
struct srp_request *req = scsi_cmd_priv(scmnd);
@@ -2152,6 +2152,7 @@ static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
{
+ struct request *rq = scsi_cmd_to_rq(scmnd);
struct srp_target_port *target = host_to_target(shost);
struct srp_rdma_ch *ch;
struct srp_request *req = scsi_cmd_priv(scmnd);
@@ -2166,8 +2167,8 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
if (unlikely(scmnd->result))
goto err;
- WARN_ON_ONCE(scmnd->request->tag < 0);
- tag = blk_mq_unique_tag(scmnd->request);
+ WARN_ON_ONCE(rq->tag < 0);
+ tag = blk_mq_unique_tag(rq);
ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
spin_lock_irqsave(&ch->lock, flags);
@@ -2791,7 +2792,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
if (!req)
return SUCCESS;
- tag = blk_mq_unique_tag(scmnd->request);
+ tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
ch_idx = blk_mq_unique_tag_to_hwq(tag);
if (WARN_ON_ONCE(ch_idx >= target->ch_count))
return SUCCESS;
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index f798922a4598..882c3c8ba399 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -28,10 +28,6 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-static bool use_ktime = true;
-module_param(use_ktime, bool, 0400);
-MODULE_PARM_DESC(use_ktime, "Use ktime for measuring I/O speed");
-
/*
* Option parsing.
*/
@@ -110,7 +106,6 @@ struct analog_port {
char cooked;
int bads;
int reads;
- int speed;
int loop;
int fuzz;
int axes[4];
@@ -120,66 +115,6 @@ struct analog_port {
};
/*
- * Time macros.
- */
-
-#ifdef __i386__
-
-#include <linux/i8253.h>
-
-#define GET_TIME(x) do { if (boot_cpu_has(X86_FEATURE_TSC)) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
-#define DELTA(x,y) (boot_cpu_has(X86_FEATURE_TSC) ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
-#define TIME_NAME (boot_cpu_has(X86_FEATURE_TSC)?"TSC":"PIT")
-static unsigned int get_time_pit(void)
-{
- unsigned long flags;
- unsigned int count;
-
- raw_spin_lock_irqsave(&i8253_lock, flags);
- outb_p(0x00, 0x43);
- count = inb_p(0x40);
- count |= inb_p(0x40) << 8;
- raw_spin_unlock_irqrestore(&i8253_lock, flags);
-
- return count;
-}
-#elif defined(__x86_64__)
-#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0)
-#define DELTA(x,y) ((y)-(x))
-#define TIME_NAME "TSC"
-#elif defined(__alpha__) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_PPC) || defined(CONFIG_RISCV)
-#define GET_TIME(x) do { x = get_cycles(); } while (0)
-#define DELTA(x,y) ((y)-(x))
-#define TIME_NAME "get_cycles"
-#else
-#define FAKE_TIME
-static unsigned long analog_faketime = 0;
-#define GET_TIME(x) do { x = analog_faketime++; } while(0)
-#define DELTA(x,y) ((y)-(x))
-#define TIME_NAME "Unreliable"
-#warning Precise timer not defined for this architecture.
-#endif
-
-static inline u64 get_time(void)
-{
- if (use_ktime) {
- return ktime_get_ns();
- } else {
- unsigned int x;
- GET_TIME(x);
- return x;
- }
-}
-
-static inline unsigned int delta(u64 x, u64 y)
-{
- if (use_ktime)
- return y - x;
- else
- return DELTA((unsigned int)x, (unsigned int)y);
-}
-
-/*
* analog_decode() decodes analog joystick data and reports input events.
*/
@@ -234,18 +169,18 @@ static void analog_decode(struct analog *analog, int *axes, int *initial, int bu
static int analog_cooked_read(struct analog_port *port)
{
struct gameport *gameport = port->gameport;
- u64 time[4], start, loop, now;
+ ktime_t time[4], start, loop, now;
unsigned int loopout, timeout;
unsigned char data[4], this, last;
unsigned long flags;
int i, j;
loopout = (ANALOG_LOOP_TIME * port->loop) / 1000;
- timeout = ANALOG_MAX_TIME * port->speed;
+ timeout = ANALOG_MAX_TIME * NSEC_PER_MSEC;
local_irq_save(flags);
gameport_trigger(gameport);
- now = get_time();
+ now = ktime_get();
local_irq_restore(flags);
start = now;
@@ -258,16 +193,16 @@ static int analog_cooked_read(struct analog_port *port)
local_irq_disable();
this = gameport_read(gameport) & port->mask;
- now = get_time();
+ now = ktime_get();
local_irq_restore(flags);
- if ((last ^ this) && (delta(loop, now) < loopout)) {
+ if ((last ^ this) && (ktime_sub(now, loop) < loopout)) {
data[i] = last ^ this;
time[i] = now;
i++;
}
- } while (this && (i < 4) && (delta(start, now) < timeout));
+ } while (this && (i < 4) && (ktime_sub(now, start) < timeout));
this <<= 4;
@@ -275,7 +210,7 @@ static int analog_cooked_read(struct analog_port *port)
this |= data[i];
for (j = 0; j < 4; j++)
if (data[i] & (1 << j))
- port->axes[j] = (delta(start, time[i]) << ANALOG_FUZZ_BITS) / port->loop;
+ port->axes[j] = ((u32)ktime_sub(time[i], start) << ANALOG_FUZZ_BITS) / port->loop;
}
return -(this != port->mask);
@@ -375,38 +310,22 @@ static void analog_calibrate_timer(struct analog_port *port)
{
struct gameport *gameport = port->gameport;
unsigned int i, t, tx;
- u64 t1, t2, t3;
+ ktime_t t1, t2, t3;
unsigned long flags;
- if (use_ktime) {
- port->speed = 1000000;
- } else {
- local_irq_save(flags);
- t1 = get_time();
-#ifdef FAKE_TIME
- analog_faketime += 830;
-#endif
- mdelay(1);
- t2 = get_time();
- t3 = get_time();
- local_irq_restore(flags);
-
- port->speed = delta(t1, t2) - delta(t2, t3);
- }
-
tx = ~0;
for (i = 0; i < 50; i++) {
local_irq_save(flags);
- t1 = get_time();
+ t1 = ktime_get();
for (t = 0; t < 50; t++) {
gameport_read(gameport);
- t2 = get_time();
+ t2 = ktime_get();
}
- t3 = get_time();
+ t3 = ktime_get();
local_irq_restore(flags);
udelay(i);
- t = delta(t1, t2) - delta(t2, t3);
+ t = ktime_sub(t2, t1) - ktime_sub(t3, t2);
if (t < tx) tx = t;
}
@@ -611,7 +530,7 @@ static int analog_init_port(struct gameport *gameport, struct gameport_driver *d
t = gameport_read(gameport);
msleep(ANALOG_MAX_TIME);
port->mask = (gameport_read(gameport) ^ t) & t & 0xf;
- port->fuzz = (port->speed * ANALOG_FUZZ_MAGIC) / port->loop / 1000 + ANALOG_FUZZ_BITS;
+ port->fuzz = (NSEC_PER_MSEC * ANALOG_FUZZ_MAGIC) / port->loop / 1000 + ANALOG_FUZZ_BITS;
for (i = 0; i < ANALOG_INIT_RETRIES; i++) {
if (!analog_cooked_read(port))
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 40a070a2e7f5..e75650e98c9e 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -210,7 +210,7 @@ config KEYBOARD_LKKBD
select SERIO
help
Say Y here if you want to use a LK201 or LK401 style serial
- keyboard. This keyboard is also useable on PCs if you attach
+ keyboard. This keyboard is also usable on PCs if you attach
it with the inputattach program. The connector pinout is
described within lkkbd.c.
diff --git a/drivers/input/keyboard/adc-keys.c b/drivers/input/keyboard/adc-keys.c
index 6d5be48d1b3d..bf72ab8df817 100644
--- a/drivers/input/keyboard/adc-keys.c
+++ b/drivers/input/keyboard/adc-keys.c
@@ -193,7 +193,7 @@ static const struct of_device_id adc_keys_of_match[] = {
MODULE_DEVICE_TABLE(of, adc_keys_of_match);
#endif
-static struct platform_driver __refdata adc_keys_driver = {
+static struct platform_driver adc_keys_driver = {
.driver = {
.name = "adc_keys",
.of_match_table = of_match_ptr(adc_keys_of_match),
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 90a59b973d00..1592da4de336 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/platform_data/adp5588.h>
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 654e0476406b..bdd264459a97 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -18,7 +18,7 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/input/adp5589.h>
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index c8194333d612..e0e931e796fa 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -157,7 +157,7 @@ static int ep93xx_keypad_open(struct input_dev *pdev)
if (!keypad->enabled) {
ep93xx_keypad_config(keypad);
- clk_enable(keypad->clk);
+ clk_prepare_enable(keypad->clk);
keypad->enabled = true;
}
@@ -169,7 +169,7 @@ static void ep93xx_keypad_close(struct input_dev *pdev)
struct ep93xx_keypad *keypad = input_get_drvdata(pdev);
if (keypad->enabled) {
- clk_disable(keypad->clk);
+ clk_disable_unprepare(keypad->clk);
keypad->enabled = false;
}
}
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index 62ccfebf2f60..c1a4d5055de6 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -316,11 +316,9 @@ static int __init hil_probe_chip(struct parisc_device *dev)
return hil_keyb_init();
}
-static int __exit hil_remove_chip(struct parisc_device *dev)
+static void __exit hil_remove_chip(struct parisc_device *dev)
{
hil_keyb_exit();
-
- return 0;
}
static const struct parisc_device_id hil_tbl[] __initconst = {
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 498cde376981..dd5227cf8696 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -309,18 +309,6 @@ config INPUT_GPIO_VIBRA
To compile this driver as a module, choose M here: the module will be
called gpio-vibra.
-config INPUT_IXP4XX_BEEPER
- tristate "IXP4XX Beeper support"
- depends on ARCH_IXP4XX
- help
- If you say yes here, you can connect a beeper to the
- ixp4xx gpio pins. This is used by the LinkSys NSLU2.
-
- If unsure, say Y.
-
- To compile this driver as a module, choose M here: the
- module will be called ixp4xx-beeper.
-
config INPUT_COBALT_BTNS
tristate "Cobalt button interface"
depends on MIPS_COBALT
@@ -811,16 +799,6 @@ config INPUT_XEN_KBDDEV_FRONTEND
To compile this driver as a module, choose M here: the
module will be called xen-kbdfront.
-config INPUT_SIRFSOC_ONKEY
- tristate "CSR SiRFSoC power on/off/suspend key support"
- depends on ARCH_SIRF && OF
- default y
- help
- Say Y here if you want to support for the SiRFSoC power on/off/suspend key
- in Linux, after you press the onkey, system will suspend.
-
- If unsure, say N.
-
config INPUT_IDEAPAD_SLIDEBAR
tristate "IdeaPad Laptop Slidebar"
depends on INPUT
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index f593beed7e05..b92c53a6b5ae 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
obj-$(CONFIG_INPUT_IQS269A) += iqs269a.o
obj-$(CONFIG_INPUT_IQS626A) += iqs626a.o
-obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
@@ -74,7 +73,6 @@ obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o
obj-$(CONFIG_INPUT_SC27XX_VIBRA) += sc27xx-vibra.o
obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
-obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o
obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o
obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
obj-$(CONFIG_INPUT_STPMIC1_ONKEY) += stpmic1_onkey.o
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
deleted file mode 100644
index 05018d0c97c7..000000000000
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ /dev/null
@@ -1,183 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Generic IXP4xx beeper driver
- *
- * Copyright (C) 2005 Tower Technologies
- *
- * based on nslu2-io.c
- * Copyright (C) 2004 Karen Spearel
- *
- * Author: Alessandro Zummo <a.zummo@towertech.it>
- * Maintainers: http://www.nslu2-linux.org/
- */
-
-#include <linux/module.h>
-#include <linux/input.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <mach/hardware.h>
-
-MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
-MODULE_DESCRIPTION("ixp4xx beeper driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ixp4xx-beeper");
-
-static DEFINE_SPINLOCK(beep_lock);
-
-static int ixp4xx_timer2_irq;
-
-static void ixp4xx_spkr_control(unsigned int pin, unsigned int count)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&beep_lock, flags);
-
- if (count) {
- gpio_direction_output(pin, 0);
- *IXP4XX_OSRT2 = (count & ~IXP4XX_OST_RELOAD_MASK) | IXP4XX_OST_ENABLE;
- } else {
- gpio_direction_output(pin, 1);
- gpio_direction_input(pin);
- *IXP4XX_OSRT2 = 0;
- }
-
- spin_unlock_irqrestore(&beep_lock, flags);
-}
-
-static int ixp4xx_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
-{
- unsigned int pin = (unsigned int) input_get_drvdata(dev);
- unsigned int count = 0;
-
- if (type != EV_SND)
- return -1;
-
- switch (code) {
- case SND_BELL:
- if (value)
- value = 1000;
- case SND_TONE:
- break;
- default:
- return -1;
- }
-
- if (value > 20 && value < 32767)
- count = (ixp4xx_timer_freq / (value * 4)) - 1;
-
- ixp4xx_spkr_control(pin, count);
-
- return 0;
-}
-
-static irqreturn_t ixp4xx_spkr_interrupt(int irq, void *dev_id)
-{
- unsigned int pin = (unsigned int) dev_id;
-
- /* clear interrupt */
- *IXP4XX_OSST = IXP4XX_OSST_TIMER_2_PEND;
-
- /* flip the beeper output */
- gpio_set_value(pin, !gpio_get_value(pin));
-
- return IRQ_HANDLED;
-}
-
-static int ixp4xx_spkr_probe(struct platform_device *dev)
-{
- struct input_dev *input_dev;
- int irq;
- int err;
-
- input_dev = input_allocate_device();
- if (!input_dev)
- return -ENOMEM;
-
- input_set_drvdata(input_dev, (void *) dev->id);
-
- input_dev->name = "ixp4xx beeper";
- input_dev->phys = "ixp4xx/gpio";
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x001f;
- input_dev->id.product = 0x0001;
- input_dev->id.version = 0x0100;
- input_dev->dev.parent = &dev->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_SND);
- input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
- input_dev->event = ixp4xx_spkr_event;
-
- irq = platform_get_irq(dev, 0);
- if (irq < 0) {
- err = irq;
- goto err_free_device;
- }
-
- err = gpio_request(dev->id, "ixp4-beeper");
- if (err)
- goto err_free_device;
-
- err = request_irq(irq, &ixp4xx_spkr_interrupt,
- IRQF_NO_SUSPEND, "ixp4xx-beeper",
- (void *) dev->id);
- if (err)
- goto err_free_gpio;
- ixp4xx_timer2_irq = irq;
-
- err = input_register_device(input_dev);
- if (err)
- goto err_free_irq;
-
- platform_set_drvdata(dev, input_dev);
-
- return 0;
-
- err_free_irq:
- free_irq(irq, (void *)dev->id);
- err_free_gpio:
- gpio_free(dev->id);
- err_free_device:
- input_free_device(input_dev);
-
- return err;
-}
-
-static int ixp4xx_spkr_remove(struct platform_device *dev)
-{
- struct input_dev *input_dev = platform_get_drvdata(dev);
- unsigned int pin = (unsigned int) input_get_drvdata(input_dev);
-
- input_unregister_device(input_dev);
-
- /* turn the speaker off */
- disable_irq(ixp4xx_timer2_irq);
- ixp4xx_spkr_control(pin, 0);
-
- free_irq(ixp4xx_timer2_irq, (void *)dev->id);
- gpio_free(dev->id);
-
- return 0;
-}
-
-static void ixp4xx_spkr_shutdown(struct platform_device *dev)
-{
- struct input_dev *input_dev = platform_get_drvdata(dev);
- unsigned int pin = (unsigned int) input_get_drvdata(input_dev);
-
- /* turn off the speaker */
- disable_irq(ixp4xx_timer2_irq);
- ixp4xx_spkr_control(pin, 0);
-}
-
-static struct platform_driver ixp4xx_spkr_platform_driver = {
- .driver = {
- .name = "ixp4xx-beeper",
- },
- .probe = ixp4xx_spkr_probe,
- .remove = ixp4xx_spkr_remove,
- .shutdown = ixp4xx_spkr_shutdown,
-};
-module_platform_driver(ixp4xx_spkr_platform_driver);
-
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 10e3fc0eac6e..33609603245d 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -284,7 +284,7 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
}
if (pwrkey->data->supports_ps_hold_poff_config) {
- pwrkey->reboot_notifier.notifier_call = pm8941_reboot_notify,
+ pwrkey->reboot_notifier.notifier_call = pm8941_reboot_notify;
error = register_reboot_notifier(&pwrkey->reboot_notifier);
if (error) {
dev_err(&pdev->dev, "failed to register reboot notifier: %d\n",
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
deleted file mode 100644
index 7982bf8fb839..000000000000
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ /dev/null
@@ -1,207 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Power key driver for SiRF PrimaII
- *
- * Copyright (c) 2013 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
- * company.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/rtc/sirfsoc_rtciobrg.h>
-#include <linux/of.h>
-#include <linux/workqueue.h>
-
-struct sirfsoc_pwrc_drvdata {
- u32 pwrc_base;
- struct input_dev *input;
- struct delayed_work work;
-};
-
-#define PWRC_ON_KEY_BIT (1 << 0)
-
-#define PWRC_INT_STATUS 0xc
-#define PWRC_INT_MASK 0x10
-#define PWRC_PIN_STATUS 0x14
-#define PWRC_KEY_DETECT_UP_TIME 20 /* ms*/
-
-static int sirfsoc_pwrc_is_on_key_down(struct sirfsoc_pwrc_drvdata *pwrcdrv)
-{
- u32 state = sirfsoc_rtc_iobrg_readl(pwrcdrv->pwrc_base +
- PWRC_PIN_STATUS);
- return !(state & PWRC_ON_KEY_BIT); /* ON_KEY is active low */
-}
-
-static void sirfsoc_pwrc_report_event(struct work_struct *work)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv =
- container_of(work, struct sirfsoc_pwrc_drvdata, work.work);
-
- if (sirfsoc_pwrc_is_on_key_down(pwrcdrv)) {
- schedule_delayed_work(&pwrcdrv->work,
- msecs_to_jiffies(PWRC_KEY_DETECT_UP_TIME));
- } else {
- input_event(pwrcdrv->input, EV_KEY, KEY_POWER, 0);
- input_sync(pwrcdrv->input);
- }
-}
-
-static irqreturn_t sirfsoc_pwrc_isr(int irq, void *dev_id)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv = dev_id;
- u32 int_status;
-
- int_status = sirfsoc_rtc_iobrg_readl(pwrcdrv->pwrc_base +
- PWRC_INT_STATUS);
- sirfsoc_rtc_iobrg_writel(int_status & ~PWRC_ON_KEY_BIT,
- pwrcdrv->pwrc_base + PWRC_INT_STATUS);
-
- input_event(pwrcdrv->input, EV_KEY, KEY_POWER, 1);
- input_sync(pwrcdrv->input);
- schedule_delayed_work(&pwrcdrv->work,
- msecs_to_jiffies(PWRC_KEY_DETECT_UP_TIME));
-
- return IRQ_HANDLED;
-}
-
-static void sirfsoc_pwrc_toggle_interrupts(struct sirfsoc_pwrc_drvdata *pwrcdrv,
- bool enable)
-{
- u32 int_mask;
-
- int_mask = sirfsoc_rtc_iobrg_readl(pwrcdrv->pwrc_base + PWRC_INT_MASK);
- if (enable)
- int_mask |= PWRC_ON_KEY_BIT;
- else
- int_mask &= ~PWRC_ON_KEY_BIT;
- sirfsoc_rtc_iobrg_writel(int_mask, pwrcdrv->pwrc_base + PWRC_INT_MASK);
-}
-
-static int sirfsoc_pwrc_open(struct input_dev *input)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv = input_get_drvdata(input);
-
- sirfsoc_pwrc_toggle_interrupts(pwrcdrv, true);
-
- return 0;
-}
-
-static void sirfsoc_pwrc_close(struct input_dev *input)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv = input_get_drvdata(input);
-
- sirfsoc_pwrc_toggle_interrupts(pwrcdrv, false);
- cancel_delayed_work_sync(&pwrcdrv->work);
-}
-
-static const struct of_device_id sirfsoc_pwrc_of_match[] = {
- { .compatible = "sirf,prima2-pwrc" },
- {},
-};
-MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match);
-
-static int sirfsoc_pwrc_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct sirfsoc_pwrc_drvdata *pwrcdrv;
- int irq;
- int error;
-
- pwrcdrv = devm_kzalloc(&pdev->dev, sizeof(struct sirfsoc_pwrc_drvdata),
- GFP_KERNEL);
- if (!pwrcdrv) {
- dev_info(&pdev->dev, "Not enough memory for the device data\n");
- return -ENOMEM;
- }
-
- /*
- * We can't use of_iomap because pwrc is not mapped in memory,
- * the so-called base address is only offset in rtciobrg
- */
- error = of_property_read_u32(np, "reg", &pwrcdrv->pwrc_base);
- if (error) {
- dev_err(&pdev->dev,
- "unable to find base address of pwrc node in dtb\n");
- return error;
- }
-
- pwrcdrv->input = devm_input_allocate_device(&pdev->dev);
- if (!pwrcdrv->input)
- return -ENOMEM;
-
- pwrcdrv->input->name = "sirfsoc pwrckey";
- pwrcdrv->input->phys = "pwrc/input0";
- pwrcdrv->input->evbit[0] = BIT_MASK(EV_KEY);
- input_set_capability(pwrcdrv->input, EV_KEY, KEY_POWER);
-
- INIT_DELAYED_WORK(&pwrcdrv->work, sirfsoc_pwrc_report_event);
-
- pwrcdrv->input->open = sirfsoc_pwrc_open;
- pwrcdrv->input->close = sirfsoc_pwrc_close;
-
- input_set_drvdata(pwrcdrv->input, pwrcdrv);
-
- /* Make sure the device is quiesced */
- sirfsoc_pwrc_toggle_interrupts(pwrcdrv, false);
-
- irq = platform_get_irq(pdev, 0);
- error = devm_request_irq(&pdev->dev, irq,
- sirfsoc_pwrc_isr, 0,
- "sirfsoc_pwrc_int", pwrcdrv);
- if (error) {
- dev_err(&pdev->dev, "unable to claim irq %d, error: %d\n",
- irq, error);
- return error;
- }
-
- error = input_register_device(pwrcdrv->input);
- if (error) {
- dev_err(&pdev->dev,
- "unable to register input device, error: %d\n",
- error);
- return error;
- }
-
- dev_set_drvdata(&pdev->dev, pwrcdrv);
- device_init_wakeup(&pdev->dev, 1);
-
- return 0;
-}
-
-static int __maybe_unused sirfsoc_pwrc_resume(struct device *dev)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv = dev_get_drvdata(dev);
- struct input_dev *input = pwrcdrv->input;
-
- /*
- * Do not mask pwrc interrupt as we want pwrc work as a wakeup source
- * if users touch X_ONKEY_B, see arch/arm/mach-prima2/pm.c
- */
- mutex_lock(&input->mutex);
- if (input_device_enabled(input))
- sirfsoc_pwrc_toggle_interrupts(pwrcdrv, true);
- mutex_unlock(&input->mutex);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(sirfsoc_pwrc_pm_ops, NULL, sirfsoc_pwrc_resume);
-
-static struct platform_driver sirfsoc_pwrc_driver = {
- .probe = sirfsoc_pwrc_probe,
- .driver = {
- .name = "sirfsoc-pwrc",
- .pm = &sirfsoc_pwrc_pm_ops,
- .of_match_table = sirfsoc_pwrc_of_match,
- }
-};
-
-module_platform_driver(sirfsoc_pwrc_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
-MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
-MODULE_ALIAS("platform:sirfsoc-pwrc");
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index dc4a240f4489..3c84deefa327 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -55,8 +55,9 @@
#define ETP_FW_PAGE_SIZE_512 512
#define ETP_FW_SIGNATURE_SIZE 6
-#define ETP_PRODUCT_ID_DELBIN 0x00C2
+#define ETP_PRODUCT_ID_WHITEBOX 0x00B8
#define ETP_PRODUCT_ID_VOXEL 0x00BF
+#define ETP_PRODUCT_ID_DELBIN 0x00C2
#define ETP_PRODUCT_ID_MAGPIE 0x0120
#define ETP_PRODUCT_ID_BOBBA 0x0121
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index dad22c1ea6a0..47af62c12267 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -105,6 +105,7 @@ static u32 elan_i2c_lookup_quirks(u16 ic_type, u16 product_id)
u32 quirks;
} elan_i2c_quirks[] = {
{ 0x0D, ETP_PRODUCT_ID_DELBIN, ETP_QUIRK_QUICK_WAKEUP },
+ { 0x0D, ETP_PRODUCT_ID_WHITEBOX, ETP_QUIRK_QUICK_WAKEUP },
{ 0x10, ETP_PRODUCT_ID_VOXEL, ETP_QUIRK_QUICK_WAKEUP },
{ 0x14, ETP_PRODUCT_ID_MAGPIE, ETP_QUIRK_QUICK_WAKEUP },
{ 0x14, ETP_PRODUCT_ID_BOBBA, ETP_QUIRK_QUICK_WAKEUP },
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 2f9775de3c5b..a9065c6ab550 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -411,7 +411,7 @@ fail_nomem:
* @return: success/error report
*/
-static int __exit gscps2_remove(struct parisc_device *dev)
+static void __exit gscps2_remove(struct parisc_device *dev)
{
struct gscps2port *ps2port = dev_get_drvdata(&dev->dev);
@@ -425,7 +425,6 @@ static int __exit gscps2_remove(struct parisc_device *dev)
#endif
dev_set_drvdata(&dev->dev, NULL);
kfree(ps2port);
- return 0;
}
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 3ac57a91ede4..51b68501896c 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -220,16 +220,4 @@ static struct parport_driver parkbd_parport_driver = {
.detach = parkbd_detach,
.devmodel = true,
};
-
-static int __init parkbd_init(void)
-{
- return parport_register_driver(&parkbd_parport_driver);
-}
-
-static void __exit parkbd_exit(void)
-{
- parport_unregister_driver(&parkbd_parport_driver);
-}
-
-module_init(parkbd_init);
-module_exit(parkbd_exit);
+module_parport_driver(parkbd_parport_driver);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index ad454cd2855a..d4e74738c5a8 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -932,7 +932,7 @@ config TOUCHSCREEN_USB_COMPOSITE
- JASTEC USB Touch Controller/DigiTech DTR-02U
- Zytronic controllers
- Elo TouchSystems 2700 IntelliTouch
- - EasyTouch USB Touch Controller from Data Modul
+ - EasyTouch USB Touch Controller from Data Module
- e2i (Mimo monitors)
Have a look at <http://linux.chapter7.ch/touchkit/> for
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 263de3bfb6cd..bb2e1cbffba7 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -899,6 +899,7 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
* the identification registers.
*/
switch (rdbuf[0]) {
+ case 0x11: /* EDT EP0110M09 */
case 0x35: /* EDT EP0350M09 */
case 0x43: /* EDT EP0430M09 */
case 0x50: /* EDT EP0500M09 */
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 0efd1a1bb192..9fa3b0e421be 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -54,6 +54,7 @@
enum mms_type {
TYPE_MMS114 = 114,
+ TYPE_MMS134S = 134,
TYPE_MMS136 = 136,
TYPE_MMS152 = 152,
TYPE_MMS345L = 345,
@@ -212,7 +213,7 @@ static irqreturn_t mms114_interrupt(int irq, void *dev_id)
goto out;
/* MMS136 has slightly different event size */
- if (data->type == TYPE_MMS136)
+ if (data->type == TYPE_MMS134S || data->type == TYPE_MMS136)
touch_size = packet_size / MMS136_EVENT_SIZE;
else
touch_size = packet_size / MMS114_EVENT_SIZE;
@@ -281,6 +282,7 @@ static int mms114_get_version(struct mms114_data *data)
break;
case TYPE_MMS114:
+ case TYPE_MMS134S:
case TYPE_MMS136:
error = __mms114_read_reg(data, MMS114_TSP_REV, 6, buf);
if (error)
@@ -304,8 +306,9 @@ static int mms114_setup_regs(struct mms114_data *data)
if (error < 0)
return error;
- /* Only MMS114 and MMS136 have configuration and power on registers */
- if (data->type != TYPE_MMS114 && data->type != TYPE_MMS136)
+ /* MMS114, MMS134S and MMS136 have configuration and power on registers */
+ if (data->type != TYPE_MMS114 && data->type != TYPE_MMS134S &&
+ data->type != TYPE_MMS136)
return 0;
error = mms114_set_active(data, true);
@@ -487,7 +490,8 @@ static int mms114_probe(struct i2c_client *client,
0, data->props.max_y, 0, 0);
}
- if (data->type == TYPE_MMS114 || data->type == TYPE_MMS136) {
+ if (data->type == TYPE_MMS114 || data->type == TYPE_MMS134S ||
+ data->type == TYPE_MMS136) {
/*
* The firmware handles movement and pressure fuzz, so
* don't duplicate that in software.
@@ -612,6 +616,9 @@ static const struct of_device_id mms114_dt_match[] = {
.compatible = "melfas,mms114",
.data = (void *)TYPE_MMS114,
}, {
+ .compatible = "melfas,mms134s",
+ .data = (void *)TYPE_MMS134S,
+ }, {
.compatible = "melfas,mms136",
.data = (void *)TYPE_MMS136,
}, {
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index f61516c17589..124c41adeca1 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -79,16 +79,57 @@ config IOMMU_DEBUGFS
debug/iommu directory, and then populate a subdirectory with
entries as required.
-config IOMMU_DEFAULT_PASSTHROUGH
- bool "IOMMU passthrough by default"
+choice
+ prompt "IOMMU default domain type"
depends on IOMMU_API
+ default IOMMU_DEFAULT_DMA_LAZY if X86 || IA64
+ default IOMMU_DEFAULT_DMA_STRICT
help
- Enable passthrough by default, removing the need to pass in
- iommu.passthrough=on or iommu=pt through command line. If this
- is enabled, you can still disable with iommu.passthrough=off
- or iommu=nopt depending on the architecture.
+ Choose the type of IOMMU domain used to manage DMA API usage by
+ device drivers. The options here typically represent different
+ levels of tradeoff between robustness/security and performance,
+ depending on the IOMMU driver. Not all IOMMUs support all options.
+ This choice can be overridden at boot via the command line, and for
+ some devices also at runtime via sysfs.
- If unsure, say N here.
+ If unsure, keep the default.
+
+config IOMMU_DEFAULT_DMA_STRICT
+ bool "Translated - Strict"
+ help
+ Trusted devices use translation to restrict their access to only
+ DMA-mapped pages, with strict TLB invalidation on unmap. Equivalent
+ to passing "iommu.passthrough=0 iommu.strict=1" on the command line.
+
+ Untrusted devices always use this mode, with an additional layer of
+ bounce-buffering such that they cannot gain access to any unrelated
+ data within a mapped page.
+
+config IOMMU_DEFAULT_DMA_LAZY
+ bool "Translated - Lazy"
+ help
+ Trusted devices use translation to restrict their access to only
+ DMA-mapped pages, but with "lazy" batched TLB invalidation. This
+ mode allows higher performance with some IOMMUs due to reduced TLB
+ flushing, but at the cost of reduced isolation since devices may be
+ able to access memory for some time after it has been unmapped.
+ Equivalent to passing "iommu.passthrough=0 iommu.strict=0" on the
+ command line.
+
+ If this mode is not supported by the IOMMU driver, the effective
+ runtime default will fall back to IOMMU_DEFAULT_DMA_STRICT.
+
+config IOMMU_DEFAULT_PASSTHROUGH
+ bool "Passthrough"
+ help
+ Trusted devices are identity-mapped, giving them unrestricted access
+ to memory with minimal performance overhead. Equivalent to passing
+ "iommu.passthrough=1" (historically "iommu=pt") on the command line.
+
+ If this mode is not supported by the IOMMU driver, the effective
+ runtime default will fall back to IOMMU_DEFAULT_DMA_STRICT.
+
+endchoice
config OF_IOMMU
def_bool y
@@ -249,6 +290,20 @@ config SPAPR_TCE_IOMMU
Enables bits of IOMMU API required by VFIO. The iommu_ops
is not implemented as it is not necessary for VFIO.
+config APPLE_DART
+ tristate "Apple DART IOMMU Support"
+ depends on ARCH_APPLE || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ default ARCH_APPLE
+ help
+ Support for Apple DART (Device Address Resolution Table) IOMMUs
+ found in Apple ARM SoCs like the M1.
+ This IOMMU is required for most peripherals using DMA to access
+ the main memory.
+
+ Say Y here if you are using an Apple SoC.
+
# ARM IOMMU support
config ARM_SMMU
tristate "ARM Ltd. System MMU (SMMU) Support"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index c0fb0ba88143..bc7f730edbb0 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -29,3 +29,4 @@ obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o io-pgfault.o
obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
+obj-$(CONFIG_APPLE_DART) += apple-dart.o
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 94c1a7a9876d..8dbe61e2b3c1 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -779,12 +779,6 @@ extern u16 amd_iommu_last_bdf;
/* allocation bitmap for domain ids */
extern unsigned long *amd_iommu_pd_alloc_bitmap;
-/*
- * If true, the addresses will be flushed on unmap time, not when
- * they are reused
- */
-extern bool amd_iommu_unmap_flush;
-
/* Smallest max PASID supported by any IOMMU in the system */
extern u32 amd_iommu_max_pasid;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 46280e6e1535..2a822b229bd0 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -161,7 +161,6 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
to handle */
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
we find in ACPI */
-bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
system */
@@ -298,6 +297,22 @@ int amd_iommu_get_num_iommus(void)
return amd_iommus_present;
}
+#ifdef CONFIG_IRQ_REMAP
+static bool check_feature_on_all_iommus(u64 mask)
+{
+ bool ret = false;
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ ret = iommu_feature(iommu, mask);
+ if (!ret)
+ return false;
+ }
+
+ return true;
+}
+#endif
+
/*
* For IVHD type 0x11/0x40, EFR is also available via IVHD.
* Default to IVHD EFR since it is available sooner
@@ -814,9 +829,9 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
return 0;
}
-#ifdef CONFIG_IRQ_REMAP
static int iommu_init_ga_log(struct amd_iommu *iommu)
{
+#ifdef CONFIG_IRQ_REMAP
u64 entry;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
@@ -846,25 +861,9 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
err_out:
free_ga_log(iommu);
return -EINVAL;
-}
-#endif /* CONFIG_IRQ_REMAP */
-
-static int iommu_init_ga(struct amd_iommu *iommu)
-{
- int ret = 0;
-
-#ifdef CONFIG_IRQ_REMAP
- /* Note: We have already checked GASup from IVRS table.
- * Now, we need to make sure that GAMSup is set.
- */
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
- !iommu_feature(iommu, FEATURE_GAM_VAPIC))
- amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
-
- ret = iommu_init_ga_log(iommu);
+#else
+ return 0;
#endif /* CONFIG_IRQ_REMAP */
-
- return ret;
}
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
@@ -1846,12 +1845,15 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
return -ENOMEM;
- ret = iommu_init_ga(iommu);
+ ret = iommu_init_ga_log(iommu);
if (ret)
return ret;
- if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
+ if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
+ pr_info("Using strict mode due to virtualization\n");
+ iommu_set_dma_strict();
amd_iommu_np_cache = true;
+ }
init_iommu_perf_ctr(iommu);
@@ -2477,6 +2479,14 @@ static void early_enable_iommus(void)
}
#ifdef CONFIG_IRQ_REMAP
+ /*
+ * Note: We have already checked GASup from IVRS table.
+ * Now, we need to make sure that GAMSup is set.
+ */
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
+ !check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
#endif
@@ -3098,8 +3108,10 @@ static int __init parse_amd_iommu_intr(char *str)
static int __init parse_amd_iommu_options(char *str)
{
for (; *str; ++str) {
- if (strncmp(str, "fullflush", 9) == 0)
- amd_iommu_unmap_flush = true;
+ if (strncmp(str, "fullflush", 9) == 0) {
+ pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
+ iommu_set_dma_strict();
+ }
if (strncmp(str, "force_enable", 12) == 0)
amd_iommu_force_enable = true;
if (strncmp(str, "off", 3) == 0)
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index bb0ee5c9fde7..182c93a43efd 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -493,9 +493,6 @@ static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned lo
unsigned long offset_mask, pte_pgsize;
u64 *pte, __pte;
- if (pgtable->mode == PAGE_MODE_NONE)
- return iova;
-
pte = fetch_pte(pgtable, iova, &pte_pgsize);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 811a49a95d04..1722bb161841 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -425,9 +425,11 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
- if (dev_data && __ratelimit(&dev_data->rs)) {
- pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
- vmg_tag, spa, flags);
+ if (dev_data) {
+ if (__ratelimit(&dev_data->rs)) {
+ pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
+ vmg_tag, spa, flags);
+ }
} else {
pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
@@ -456,9 +458,11 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event)
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
- if (dev_data && __ratelimit(&dev_data->rs)) {
- pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
- vmg_tag, gpa, flags_rmp, flags);
+ if (dev_data) {
+ if (__ratelimit(&dev_data->rs)) {
+ pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
+ vmg_tag, gpa, flags_rmp, flags);
+ }
} else {
pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
@@ -480,11 +484,13 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
- if (dev_data && __ratelimit(&dev_data->rs)) {
- pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
- domain_id, address, flags);
- } else if (printk_ratelimit()) {
- pr_err("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
+ if (dev_data) {
+ if (__ratelimit(&dev_data->rs)) {
+ pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
+ domain_id, address, flags);
+ }
+ } else {
+ pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
domain_id, address, flags);
}
@@ -1261,15 +1267,52 @@ static void __domain_flush_pages(struct protection_domain *domain,
}
static void domain_flush_pages(struct protection_domain *domain,
- u64 address, size_t size)
+ u64 address, size_t size, int pde)
{
- __domain_flush_pages(domain, address, size, 0);
+ if (likely(!amd_iommu_np_cache)) {
+ __domain_flush_pages(domain, address, size, pde);
+ return;
+ }
+
+ /*
+ * When NpCache is on, we infer that we run in a VM and use a vIOMMU.
+ * In such setups it is best to avoid flushes of ranges which are not
+ * naturally aligned, since it would lead to flushes of unmodified
+ * PTEs. Such flushes would require the hypervisor to do more work than
+ * necessary. Therefore, perform repeated flushes of aligned ranges
+ * until you cover the range. Each iteration flushes the smaller
+ * between the natural alignment of the address that we flush and the
+ * greatest naturally aligned region that fits in the range.
+ */
+ while (size != 0) {
+ int addr_alignment = __ffs(address);
+ int size_alignment = __fls(size);
+ int min_alignment;
+ size_t flush_size;
+
+ /*
+ * size is always non-zero, but address might be zero, causing
+ * addr_alignment to be negative. As the casting of the
+ * argument in __ffs(address) to long might trim the high bits
+ * of the address on x86-32, cast to long when doing the check.
+ */
+ if (likely((unsigned long)address != 0))
+ min_alignment = min(addr_alignment, size_alignment);
+ else
+ min_alignment = size_alignment;
+
+ flush_size = 1ul << min_alignment;
+
+ __domain_flush_pages(domain, address, flush_size, pde);
+ address += flush_size;
+ size -= flush_size;
+ }
}
/* Flush the whole IO/TLB for a given protection domain - including PDE */
void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
{
- __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
+ domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
}
void amd_iommu_domain_flush_complete(struct protection_domain *domain)
@@ -1296,7 +1339,7 @@ static void domain_flush_np_cache(struct protection_domain *domain,
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
- domain_flush_pages(domain, iova, size);
+ domain_flush_pages(domain, iova, size, 1);
amd_iommu_domain_flush_complete(domain);
spin_unlock_irqrestore(&domain->lock, flags);
}
@@ -1707,14 +1750,9 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
static void amd_iommu_probe_finalize(struct device *dev)
{
- struct iommu_domain *domain;
-
/* Domains are initialized for this device - have a look what we ended up with */
- domain = iommu_get_domain_for_dev(dev);
- if (domain->type == IOMMU_DOMAIN_DMA)
- iommu_setup_dma_ops(dev, 0, U64_MAX);
- else
- set_dma_ops(dev, NULL);
+ set_dma_ops(dev, NULL);
+ iommu_setup_dma_ops(dev, 0, U64_MAX);
}
static void amd_iommu_release_device(struct device *dev)
@@ -1775,12 +1813,6 @@ void amd_iommu_domain_update(struct protection_domain *domain)
static void __init amd_iommu_init_dma_ops(void)
{
swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
-
- if (amd_iommu_unmap_flush)
- pr_info("IO/TLB flush on unmap enabled\n");
- else
- pr_info("Lazy IO/TLB flushing enabled\n");
- iommu_set_dma_strict(amd_iommu_unmap_flush);
}
int __init amd_iommu_init_api(void)
@@ -1924,16 +1956,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
domain->domain.geometry.aperture_end = ~0ULL;
domain->domain.geometry.force_aperture = true;
- if (type == IOMMU_DOMAIN_DMA &&
- iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
- goto free_domain;
-
return &domain->domain;
-
-free_domain:
- protection_domain_free(domain);
-
- return NULL;
}
static void amd_iommu_domain_free(struct iommu_domain *dom)
@@ -1950,9 +1973,6 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
if (!dom)
return;
- if (dom->type == IOMMU_DOMAIN_DMA)
- iommu_put_dma_cookie(&domain->domain);
-
if (domain->flags & PD_IOMMUV2_MASK)
free_gcr3_table(domain);
@@ -2022,6 +2042,16 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
return ret;
}
+static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
+ unsigned long iova, size_t size)
+{
+ struct protection_domain *domain = to_pdomain(dom);
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+
+ if (ops->map)
+ domain_flush_np_cache(domain, iova, size);
+}
+
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
phys_addr_t paddr, size_t page_size, int iommu_prot,
gfp_t gfp)
@@ -2040,26 +2070,50 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- if (ops->map) {
+ if (ops->map)
ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
- domain_flush_np_cache(domain, iova, page_size);
- }
return ret;
}
+static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ /*
+ * AMD's IOMMU can flush as many pages as necessary in a single flush.
+ * Unless we run in a virtual machine, which can be inferred according
+ * to whether "non-present cache" is on, it is probably best to prefer
+ * (potentially) too extensive TLB flushing (i.e., more misses) over
+ * mutliple TLB flushes (i.e., more flushes). For virtual machines the
+ * hypervisor needs to synchronize the host IOMMU PTEs with those of
+ * the guest, and the trade-off is different: unnecessary TLB flushes
+ * should be avoided.
+ */
+ if (amd_iommu_np_cache &&
+ iommu_iotlb_gather_is_disjoint(gather, iova, size))
+ iommu_iotlb_sync(domain, gather);
+
+ iommu_iotlb_gather_add_range(gather, iova, size);
+}
+
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
size_t page_size,
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+ size_t r;
if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
(domain->iop.mode == PAGE_MODE_NONE))
return 0;
- return (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
+ r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
+
+ amd_iommu_iotlb_gather_add_page(dom, gather, iova, page_size);
+
+ return r;
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -2162,7 +2216,13 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
- amd_iommu_flush_iotlb_all(domain);
+ struct protection_domain *dom = to_pdomain(domain);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dom->lock, flags);
+ domain_flush_pages(dom, gather->start, gather->end - gather->start, 1);
+ amd_iommu_domain_flush_complete(dom);
+ spin_unlock_irqrestore(&dom->lock, flags);
}
static int amd_iommu_def_domain_type(struct device *dev)
@@ -2191,6 +2251,7 @@ const struct iommu_ops amd_iommu_ops = {
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map,
+ .iotlb_sync_map = amd_iommu_iotlb_sync_map,
.unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys,
.probe_device = amd_iommu_probe_device,
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index f8d4ad421e07..a9e568276c99 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) "AMD-Vi: " fmt
+#include <linux/refcount.h>
#include <linux/mmu_notifier.h>
#include <linux/amd-iommu.h>
#include <linux/mm_types.h>
@@ -33,7 +34,7 @@ struct pri_queue {
struct pasid_state {
struct list_head list; /* For global state-list */
- atomic_t count; /* Reference count */
+ refcount_t count; /* Reference count */
unsigned mmu_notifier_count; /* Counting nested mmu_notifier
calls */
struct mm_struct *mm; /* mm_struct for the faults */
@@ -242,7 +243,7 @@ static struct pasid_state *get_pasid_state(struct device_state *dev_state,
ret = *ptr;
if (ret)
- atomic_inc(&ret->count);
+ refcount_inc(&ret->count);
out_unlock:
spin_unlock_irqrestore(&dev_state->lock, flags);
@@ -257,14 +258,14 @@ static void free_pasid_state(struct pasid_state *pasid_state)
static void put_pasid_state(struct pasid_state *pasid_state)
{
- if (atomic_dec_and_test(&pasid_state->count))
+ if (refcount_dec_and_test(&pasid_state->count))
wake_up(&pasid_state->wq);
}
static void put_pasid_state_wait(struct pasid_state *pasid_state)
{
- atomic_dec(&pasid_state->count);
- wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
+ refcount_dec(&pasid_state->count);
+ wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
free_pasid_state(pasid_state);
}
@@ -624,7 +625,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
goto out;
- atomic_set(&pasid_state->count, 1);
+ refcount_set(&pasid_state->count, 1);
init_waitqueue_head(&pasid_state->wq);
spin_lock_init(&pasid_state->lock);
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
new file mode 100644
index 000000000000..559db9259e65
--- /dev/null
+++ b/drivers/iommu/apple-dart.c
@@ -0,0 +1,923 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Apple DART (Device Address Resolution Table) IOMMU driver
+ *
+ * Copyright (C) 2021 The Asahi Linux Contributors
+ *
+ * Based on arm/arm-smmu/arm-ssmu.c and arm/arm-smmu-v3/arm-smmu-v3.c
+ * Copyright (C) 2013 ARM Limited
+ * Copyright (C) 2015 ARM Limited
+ * and on exynos-iommu.c
+ * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io-pgtable.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_iommu.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/swab.h>
+#include <linux/types.h>
+
+#define DART_MAX_STREAMS 16
+#define DART_MAX_TTBR 4
+#define MAX_DARTS_PER_DEVICE 2
+
+#define DART_STREAM_ALL 0xffff
+
+#define DART_PARAMS1 0x00
+#define DART_PARAMS_PAGE_SHIFT GENMASK(27, 24)
+
+#define DART_PARAMS2 0x04
+#define DART_PARAMS_BYPASS_SUPPORT BIT(0)
+
+#define DART_STREAM_COMMAND 0x20
+#define DART_STREAM_COMMAND_BUSY BIT(2)
+#define DART_STREAM_COMMAND_INVALIDATE BIT(20)
+
+#define DART_STREAM_SELECT 0x34
+
+#define DART_ERROR 0x40
+#define DART_ERROR_STREAM GENMASK(27, 24)
+#define DART_ERROR_CODE GENMASK(11, 0)
+#define DART_ERROR_FLAG BIT(31)
+
+#define DART_ERROR_READ_FAULT BIT(4)
+#define DART_ERROR_WRITE_FAULT BIT(3)
+#define DART_ERROR_NO_PTE BIT(2)
+#define DART_ERROR_NO_PMD BIT(1)
+#define DART_ERROR_NO_TTBR BIT(0)
+
+#define DART_CONFIG 0x60
+#define DART_CONFIG_LOCK BIT(15)
+
+#define DART_STREAM_COMMAND_BUSY_TIMEOUT 100
+
+#define DART_ERROR_ADDR_HI 0x54
+#define DART_ERROR_ADDR_LO 0x50
+
+#define DART_TCR(sid) (0x100 + 4 * (sid))
+#define DART_TCR_TRANSLATE_ENABLE BIT(7)
+#define DART_TCR_BYPASS0_ENABLE BIT(8)
+#define DART_TCR_BYPASS1_ENABLE BIT(12)
+
+#define DART_TTBR(sid, idx) (0x200 + 16 * (sid) + 4 * (idx))
+#define DART_TTBR_VALID BIT(31)
+#define DART_TTBR_SHIFT 12
+
+/*
+ * Private structure associated with each DART device.
+ *
+ * @dev: device struct
+ * @regs: mapped MMIO region
+ * @irq: interrupt number, can be shared with other DARTs
+ * @clks: clocks associated with this DART
+ * @num_clks: number of @clks
+ * @lock: lock for hardware operations involving this dart
+ * @pgsize: pagesize supported by this DART
+ * @supports_bypass: indicates if this DART supports bypass mode
+ * @force_bypass: force bypass mode due to pagesize mismatch?
+ * @sid2group: maps stream ids to iommu_groups
+ * @iommu: iommu core device
+ */
+struct apple_dart {
+ struct device *dev;
+
+ void __iomem *regs;
+
+ int irq;
+ struct clk_bulk_data *clks;
+ int num_clks;
+
+ spinlock_t lock;
+
+ u32 pgsize;
+ u32 supports_bypass : 1;
+ u32 force_bypass : 1;
+
+ struct iommu_group *sid2group[DART_MAX_STREAMS];
+ struct iommu_device iommu;
+};
+
+/*
+ * Convenience struct to identify streams.
+ *
+ * The normal variant is used inside apple_dart_master_cfg which isn't written
+ * to concurrently.
+ * The atomic variant is used inside apple_dart_domain where we have to guard
+ * against races from potential parallel calls to attach/detach_device.
+ * Note that even inside the atomic variant the apple_dart pointer is not
+ * protected: This pointer is initialized once under the domain init mutex
+ * and never changed again afterwards. Devices with different dart pointers
+ * cannot be attached to the same domain.
+ *
+ * @dart dart pointer
+ * @sid stream id bitmap
+ */
+struct apple_dart_stream_map {
+ struct apple_dart *dart;
+ unsigned long sidmap;
+};
+struct apple_dart_atomic_stream_map {
+ struct apple_dart *dart;
+ atomic64_t sidmap;
+};
+
+/*
+ * This structure is attached to each iommu domain handled by a DART.
+ *
+ * @pgtbl_ops: pagetable ops allocated by io-pgtable
+ * @finalized: true if the domain has been completely initialized
+ * @init_lock: protects domain initialization
+ * @stream_maps: streams attached to this domain (valid for DMA/UNMANAGED only)
+ * @domain: core iommu domain pointer
+ */
+struct apple_dart_domain {
+ struct io_pgtable_ops *pgtbl_ops;
+
+ bool finalized;
+ struct mutex init_lock;
+ struct apple_dart_atomic_stream_map stream_maps[MAX_DARTS_PER_DEVICE];
+
+ struct iommu_domain domain;
+};
+
+/*
+ * This structure is attached to devices with dev_iommu_priv_set() on of_xlate
+ * and contains a list of streams bound to this device.
+ * So far the worst case seen is a single device with two streams
+ * from different darts, such that this simple static array is enough.
+ *
+ * @streams: streams for this device
+ */
+struct apple_dart_master_cfg {
+ struct apple_dart_stream_map stream_maps[MAX_DARTS_PER_DEVICE];
+};
+
+/*
+ * Helper macro to iterate over apple_dart_master_cfg.stream_maps and
+ * apple_dart_domain.stream_maps
+ *
+ * @i int used as loop variable
+ * @base pointer to base struct (apple_dart_master_cfg or apple_dart_domain)
+ * @stream pointer to the apple_dart_streams struct for each loop iteration
+ */
+#define for_each_stream_map(i, base, stream_map) \
+ for (i = 0, stream_map = &(base)->stream_maps[0]; \
+ i < MAX_DARTS_PER_DEVICE && stream_map->dart; \
+ stream_map = &(base)->stream_maps[++i])
+
+static struct platform_driver apple_dart_driver;
+static const struct iommu_ops apple_dart_iommu_ops;
+static const struct iommu_flush_ops apple_dart_tlb_ops;
+
+static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct apple_dart_domain, domain);
+}
+
+static void
+apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map)
+{
+ int sid;
+
+ for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+ writel(DART_TCR_TRANSLATE_ENABLE,
+ stream_map->dart->regs + DART_TCR(sid));
+}
+
+static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map)
+{
+ int sid;
+
+ for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+ writel(0, stream_map->dart->regs + DART_TCR(sid));
+}
+
+static void
+apple_dart_hw_enable_bypass(struct apple_dart_stream_map *stream_map)
+{
+ int sid;
+
+ WARN_ON(!stream_map->dart->supports_bypass);
+ for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+ writel(DART_TCR_BYPASS0_ENABLE | DART_TCR_BYPASS1_ENABLE,
+ stream_map->dart->regs + DART_TCR(sid));
+}
+
+static void apple_dart_hw_set_ttbr(struct apple_dart_stream_map *stream_map,
+ u8 idx, phys_addr_t paddr)
+{
+ int sid;
+
+ WARN_ON(paddr & ((1 << DART_TTBR_SHIFT) - 1));
+ for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+ writel(DART_TTBR_VALID | (paddr >> DART_TTBR_SHIFT),
+ stream_map->dart->regs + DART_TTBR(sid, idx));
+}
+
+static void apple_dart_hw_clear_ttbr(struct apple_dart_stream_map *stream_map,
+ u8 idx)
+{
+ int sid;
+
+ for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+ writel(0, stream_map->dart->regs + DART_TTBR(sid, idx));
+}
+
+static void
+apple_dart_hw_clear_all_ttbrs(struct apple_dart_stream_map *stream_map)
+{
+ int i;
+
+ for (i = 0; i < DART_MAX_TTBR; ++i)
+ apple_dart_hw_clear_ttbr(stream_map, i);
+}
+
+static int
+apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
+ u32 command)
+{
+ unsigned long flags;
+ int ret;
+ u32 command_reg;
+
+ spin_lock_irqsave(&stream_map->dart->lock, flags);
+
+ writel(stream_map->sidmap, stream_map->dart->regs + DART_STREAM_SELECT);
+ writel(command, stream_map->dart->regs + DART_STREAM_COMMAND);
+
+ ret = readl_poll_timeout_atomic(
+ stream_map->dart->regs + DART_STREAM_COMMAND, command_reg,
+ !(command_reg & DART_STREAM_COMMAND_BUSY), 1,
+ DART_STREAM_COMMAND_BUSY_TIMEOUT);
+
+ spin_unlock_irqrestore(&stream_map->dart->lock, flags);
+
+ if (ret) {
+ dev_err(stream_map->dart->dev,
+ "busy bit did not clear after command %x for streams %lx\n",
+ command, stream_map->sidmap);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+apple_dart_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
+{
+ return apple_dart_hw_stream_command(stream_map,
+ DART_STREAM_COMMAND_INVALIDATE);
+}
+
+static int apple_dart_hw_reset(struct apple_dart *dart)
+{
+ u32 config;
+ struct apple_dart_stream_map stream_map;
+
+ config = readl(dart->regs + DART_CONFIG);
+ if (config & DART_CONFIG_LOCK) {
+ dev_err(dart->dev, "DART is locked down until reboot: %08x\n",
+ config);
+ return -EINVAL;
+ }
+
+ stream_map.dart = dart;
+ stream_map.sidmap = DART_STREAM_ALL;
+ apple_dart_hw_disable_dma(&stream_map);
+ apple_dart_hw_clear_all_ttbrs(&stream_map);
+
+ /* clear any pending errors before the interrupt is unmasked */
+ writel(readl(dart->regs + DART_ERROR), dart->regs + DART_ERROR);
+
+ return apple_dart_hw_invalidate_tlb(&stream_map);
+}
+
+static void apple_dart_domain_flush_tlb(struct apple_dart_domain *domain)
+{
+ int i;
+ struct apple_dart_atomic_stream_map *domain_stream_map;
+ struct apple_dart_stream_map stream_map;
+
+ for_each_stream_map(i, domain, domain_stream_map) {
+ stream_map.dart = domain_stream_map->dart;
+ stream_map.sidmap = atomic64_read(&domain_stream_map->sidmap);
+ apple_dart_hw_invalidate_tlb(&stream_map);
+ }
+}
+
+static void apple_dart_flush_iotlb_all(struct iommu_domain *domain)
+{
+ apple_dart_domain_flush_tlb(to_dart_domain(domain));
+}
+
+static void apple_dart_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
+{
+ apple_dart_domain_flush_tlb(to_dart_domain(domain));
+}
+
+static void apple_dart_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ apple_dart_domain_flush_tlb(to_dart_domain(domain));
+}
+
+static void apple_dart_tlb_flush_all(void *cookie)
+{
+ apple_dart_domain_flush_tlb(cookie);
+}
+
+static void apple_dart_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ apple_dart_domain_flush_tlb(cookie);
+}
+
+static const struct iommu_flush_ops apple_dart_tlb_ops = {
+ .tlb_flush_all = apple_dart_tlb_flush_all,
+ .tlb_flush_walk = apple_dart_tlb_flush_walk,
+};
+
+static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct apple_dart_domain *dart_domain = to_dart_domain(domain);
+ struct io_pgtable_ops *ops = dart_domain->pgtbl_ops;
+
+ if (!ops)
+ return 0;
+
+ return ops->iova_to_phys(ops, iova);
+}
+
+static int apple_dart_map_pages(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize,
+ size_t pgcount, int prot, gfp_t gfp,
+ size_t *mapped)
+{
+ struct apple_dart_domain *dart_domain = to_dart_domain(domain);
+ struct io_pgtable_ops *ops = dart_domain->pgtbl_ops;
+
+ if (!ops)
+ return -ENODEV;
+
+ return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp,
+ mapped);
+}
+
+static size_t apple_dart_unmap_pages(struct iommu_domain *domain,
+ unsigned long iova, size_t pgsize,
+ size_t pgcount,
+ struct iommu_iotlb_gather *gather)
+{
+ struct apple_dart_domain *dart_domain = to_dart_domain(domain);
+ struct io_pgtable_ops *ops = dart_domain->pgtbl_ops;
+
+ return ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
+}
+
+static void
+apple_dart_setup_translation(struct apple_dart_domain *domain,
+ struct apple_dart_stream_map *stream_map)
+{
+ int i;
+ struct io_pgtable_cfg *pgtbl_cfg =
+ &io_pgtable_ops_to_pgtable(domain->pgtbl_ops)->cfg;
+
+ for (i = 0; i < pgtbl_cfg->apple_dart_cfg.n_ttbrs; ++i)
+ apple_dart_hw_set_ttbr(stream_map, i,
+ pgtbl_cfg->apple_dart_cfg.ttbr[i]);
+ for (; i < DART_MAX_TTBR; ++i)
+ apple_dart_hw_clear_ttbr(stream_map, i);
+
+ apple_dart_hw_enable_translation(stream_map);
+ apple_dart_hw_invalidate_tlb(stream_map);
+}
+
+static int apple_dart_finalize_domain(struct iommu_domain *domain,
+ struct apple_dart_master_cfg *cfg)
+{
+ struct apple_dart_domain *dart_domain = to_dart_domain(domain);
+ struct apple_dart *dart = cfg->stream_maps[0].dart;
+ struct io_pgtable_cfg pgtbl_cfg;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&dart_domain->init_lock);
+
+ if (dart_domain->finalized)
+ goto done;
+
+ for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
+ dart_domain->stream_maps[i].dart = cfg->stream_maps[i].dart;
+ atomic64_set(&dart_domain->stream_maps[i].sidmap,
+ cfg->stream_maps[i].sidmap);
+ }
+
+ pgtbl_cfg = (struct io_pgtable_cfg){
+ .pgsize_bitmap = dart->pgsize,
+ .ias = 32,
+ .oas = 36,
+ .coherent_walk = 1,
+ .tlb = &apple_dart_tlb_ops,
+ .iommu_dev = dart->dev,
+ };
+
+ dart_domain->pgtbl_ops =
+ alloc_io_pgtable_ops(APPLE_DART, &pgtbl_cfg, domain);
+ if (!dart_domain->pgtbl_ops) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ domain->geometry.aperture_start = 0;
+ domain->geometry.aperture_end = DMA_BIT_MASK(32);
+ domain->geometry.force_aperture = true;
+
+ dart_domain->finalized = true;
+
+done:
+ mutex_unlock(&dart_domain->init_lock);
+ return ret;
+}
+
+static int
+apple_dart_mod_streams(struct apple_dart_atomic_stream_map *domain_maps,
+ struct apple_dart_stream_map *master_maps,
+ bool add_streams)
+{
+ int i;
+
+ for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
+ if (domain_maps[i].dart != master_maps[i].dart)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
+ if (!domain_maps[i].dart)
+ break;
+ if (add_streams)
+ atomic64_or(master_maps[i].sidmap,
+ &domain_maps[i].sidmap);
+ else
+ atomic64_and(~master_maps[i].sidmap,
+ &domain_maps[i].sidmap);
+ }
+
+ return 0;
+}
+
+static int apple_dart_domain_add_streams(struct apple_dart_domain *domain,
+ struct apple_dart_master_cfg *cfg)
+{
+ return apple_dart_mod_streams(domain->stream_maps, cfg->stream_maps,
+ true);
+}
+
+static int apple_dart_domain_remove_streams(struct apple_dart_domain *domain,
+ struct apple_dart_master_cfg *cfg)
+{
+ return apple_dart_mod_streams(domain->stream_maps, cfg->stream_maps,
+ false);
+}
+
+static int apple_dart_attach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ int ret, i;
+ struct apple_dart_stream_map *stream_map;
+ struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct apple_dart_domain *dart_domain = to_dart_domain(domain);
+
+ if (cfg->stream_maps[0].dart->force_bypass &&
+ domain->type != IOMMU_DOMAIN_IDENTITY)
+ return -EINVAL;
+ if (!cfg->stream_maps[0].dart->supports_bypass &&
+ domain->type == IOMMU_DOMAIN_IDENTITY)
+ return -EINVAL;
+
+ ret = apple_dart_finalize_domain(domain, cfg);
+ if (ret)
+ return ret;
+
+ switch (domain->type) {
+ case IOMMU_DOMAIN_DMA:
+ case IOMMU_DOMAIN_UNMANAGED:
+ ret = apple_dart_domain_add_streams(dart_domain, cfg);
+ if (ret)
+ return ret;
+
+ for_each_stream_map(i, cfg, stream_map)
+ apple_dart_setup_translation(dart_domain, stream_map);
+ break;
+ case IOMMU_DOMAIN_BLOCKED:
+ for_each_stream_map(i, cfg, stream_map)
+ apple_dart_hw_disable_dma(stream_map);
+ break;
+ case IOMMU_DOMAIN_IDENTITY:
+ for_each_stream_map(i, cfg, stream_map)
+ apple_dart_hw_enable_bypass(stream_map);
+ break;
+ }
+
+ return ret;
+}
+
+static void apple_dart_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ int i;
+ struct apple_dart_stream_map *stream_map;
+ struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct apple_dart_domain *dart_domain = to_dart_domain(domain);
+
+ for_each_stream_map(i, cfg, stream_map)
+ apple_dart_hw_disable_dma(stream_map);
+
+ if (domain->type == IOMMU_DOMAIN_DMA ||
+ domain->type == IOMMU_DOMAIN_UNMANAGED)
+ apple_dart_domain_remove_streams(dart_domain, cfg);
+}
+
+static struct iommu_device *apple_dart_probe_device(struct device *dev)
+{
+ struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct apple_dart_stream_map *stream_map;
+ int i;
+
+ if (!cfg)
+ return ERR_PTR(-ENODEV);
+
+ for_each_stream_map(i, cfg, stream_map)
+ device_link_add(
+ dev, stream_map->dart->dev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
+
+ return &cfg->stream_maps[0].dart->iommu;
+}
+
+static void apple_dart_release_device(struct device *dev)
+{
+ struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
+
+ if (!cfg)
+ return;
+
+ dev_iommu_priv_set(dev, NULL);
+ kfree(cfg);
+}
+
+static struct iommu_domain *apple_dart_domain_alloc(unsigned int type)
+{
+ struct apple_dart_domain *dart_domain;
+
+ if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED &&
+ type != IOMMU_DOMAIN_IDENTITY && type != IOMMU_DOMAIN_BLOCKED)
+ return NULL;
+
+ dart_domain = kzalloc(sizeof(*dart_domain), GFP_KERNEL);
+ if (!dart_domain)
+ return NULL;
+
+ iommu_get_dma_cookie(&dart_domain->domain);
+ mutex_init(&dart_domain->init_lock);
+
+ /* no need to allocate pgtbl_ops or do any other finalization steps */
+ if (type == IOMMU_DOMAIN_IDENTITY || type == IOMMU_DOMAIN_BLOCKED)
+ dart_domain->finalized = true;
+
+ return &dart_domain->domain;
+}
+
+static void apple_dart_domain_free(struct iommu_domain *domain)
+{
+ struct apple_dart_domain *dart_domain = to_dart_domain(domain);
+
+ if (dart_domain->pgtbl_ops)
+ free_io_pgtable_ops(dart_domain->pgtbl_ops);
+
+ kfree(dart_domain);
+}
+
+static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+ struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
+ struct apple_dart *dart = platform_get_drvdata(iommu_pdev);
+ struct apple_dart *cfg_dart;
+ int i, sid;
+
+ if (args->args_count != 1)
+ return -EINVAL;
+ sid = args->args[0];
+
+ if (!cfg)
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+ dev_iommu_priv_set(dev, cfg);
+
+ cfg_dart = cfg->stream_maps[0].dart;
+ if (cfg_dart) {
+ if (cfg_dart->supports_bypass != dart->supports_bypass)
+ return -EINVAL;
+ if (cfg_dart->force_bypass != dart->force_bypass)
+ return -EINVAL;
+ if (cfg_dart->pgsize != dart->pgsize)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
+ if (cfg->stream_maps[i].dart == dart) {
+ cfg->stream_maps[i].sidmap |= 1 << sid;
+ return 0;
+ }
+ }
+ for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
+ if (!cfg->stream_maps[i].dart) {
+ cfg->stream_maps[i].dart = dart;
+ cfg->stream_maps[i].sidmap = 1 << sid;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static struct iommu_group *apple_dart_device_group(struct device *dev)
+{
+ static DEFINE_MUTEX(lock);
+ int i, sid;
+ struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct apple_dart_stream_map *stream_map;
+ struct iommu_group *group = NULL;
+ struct iommu_group *res = ERR_PTR(-EINVAL);
+
+ mutex_lock(&lock);
+
+ for_each_stream_map(i, cfg, stream_map) {
+ for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) {
+ struct iommu_group *stream_group =
+ stream_map->dart->sid2group[sid];
+
+ if (group && group != stream_group) {
+ res = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ group = stream_group;
+ }
+ }
+
+ if (group) {
+ res = iommu_group_ref_get(group);
+ goto out;
+ }
+
+#ifdef CONFIG_PCI
+ if (dev_is_pci(dev))
+ group = pci_device_group(dev);
+ else
+#endif
+ group = generic_device_group(dev);
+
+ for_each_stream_map(i, cfg, stream_map)
+ for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+ stream_map->dart->sid2group[sid] = group;
+
+ res = group;
+
+out:
+ mutex_unlock(&lock);
+ return res;
+}
+
+static int apple_dart_def_domain_type(struct device *dev)
+{
+ struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
+
+ if (cfg->stream_maps[0].dart->force_bypass)
+ return IOMMU_DOMAIN_IDENTITY;
+ if (!cfg->stream_maps[0].dart->supports_bypass)
+ return IOMMU_DOMAIN_DMA;
+
+ return 0;
+}
+
+static const struct iommu_ops apple_dart_iommu_ops = {
+ .domain_alloc = apple_dart_domain_alloc,
+ .domain_free = apple_dart_domain_free,
+ .attach_dev = apple_dart_attach_dev,
+ .detach_dev = apple_dart_detach_dev,
+ .map_pages = apple_dart_map_pages,
+ .unmap_pages = apple_dart_unmap_pages,
+ .flush_iotlb_all = apple_dart_flush_iotlb_all,
+ .iotlb_sync = apple_dart_iotlb_sync,
+ .iotlb_sync_map = apple_dart_iotlb_sync_map,
+ .iova_to_phys = apple_dart_iova_to_phys,
+ .probe_device = apple_dart_probe_device,
+ .release_device = apple_dart_release_device,
+ .device_group = apple_dart_device_group,
+ .of_xlate = apple_dart_of_xlate,
+ .def_domain_type = apple_dart_def_domain_type,
+ .pgsize_bitmap = -1UL, /* Restricted during dart probe */
+};
+
+static irqreturn_t apple_dart_irq(int irq, void *dev)
+{
+ struct apple_dart *dart = dev;
+ const char *fault_name = NULL;
+ u32 error = readl(dart->regs + DART_ERROR);
+ u32 error_code = FIELD_GET(DART_ERROR_CODE, error);
+ u32 addr_lo = readl(dart->regs + DART_ERROR_ADDR_LO);
+ u32 addr_hi = readl(dart->regs + DART_ERROR_ADDR_HI);
+ u64 addr = addr_lo | (((u64)addr_hi) << 32);
+ u8 stream_idx = FIELD_GET(DART_ERROR_STREAM, error);
+
+ if (!(error & DART_ERROR_FLAG))
+ return IRQ_NONE;
+
+ /* there should only be a single bit set but let's use == to be sure */
+ if (error_code == DART_ERROR_READ_FAULT)
+ fault_name = "READ FAULT";
+ else if (error_code == DART_ERROR_WRITE_FAULT)
+ fault_name = "WRITE FAULT";
+ else if (error_code == DART_ERROR_NO_PTE)
+ fault_name = "NO PTE FOR IOVA";
+ else if (error_code == DART_ERROR_NO_PMD)
+ fault_name = "NO PMD FOR IOVA";
+ else if (error_code == DART_ERROR_NO_TTBR)
+ fault_name = "NO TTBR FOR IOVA";
+ else
+ fault_name = "unknown";
+
+ dev_err_ratelimited(
+ dart->dev,
+ "translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx",
+ error, stream_idx, error_code, fault_name, addr);
+
+ writel(error, dart->regs + DART_ERROR);
+ return IRQ_HANDLED;
+}
+
+static int apple_dart_set_bus_ops(const struct iommu_ops *ops)
+{
+ int ret;
+
+ if (!iommu_present(&platform_bus_type)) {
+ ret = bus_set_iommu(&platform_bus_type, ops);
+ if (ret)
+ return ret;
+ }
+#ifdef CONFIG_PCI
+ if (!iommu_present(&pci_bus_type)) {
+ ret = bus_set_iommu(&pci_bus_type, ops);
+ if (ret) {
+ bus_set_iommu(&platform_bus_type, NULL);
+ return ret;
+ }
+ }
+#endif
+ return 0;
+}
+
+static int apple_dart_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 dart_params[2];
+ struct resource *res;
+ struct apple_dart *dart;
+ struct device *dev = &pdev->dev;
+
+ dart = devm_kzalloc(dev, sizeof(*dart), GFP_KERNEL);
+ if (!dart)
+ return -ENOMEM;
+
+ dart->dev = dev;
+ spin_lock_init(&dart->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (resource_size(res) < 0x4000) {
+ dev_err(dev, "MMIO region too small (%pr)\n", res);
+ return -EINVAL;
+ }
+
+ dart->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dart->regs))
+ return PTR_ERR(dart->regs);
+
+ dart->irq = platform_get_irq(pdev, 0);
+ if (dart->irq < 0)
+ return -ENODEV;
+
+ ret = devm_clk_bulk_get_all(dev, &dart->clks);
+ if (ret < 0)
+ return ret;
+ dart->num_clks = ret;
+
+ ret = clk_bulk_prepare_enable(dart->num_clks, dart->clks);
+ if (ret)
+ return ret;
+
+ ret = apple_dart_hw_reset(dart);
+ if (ret)
+ goto err_clk_disable;
+
+ dart_params[0] = readl(dart->regs + DART_PARAMS1);
+ dart_params[1] = readl(dart->regs + DART_PARAMS2);
+ dart->pgsize = 1 << FIELD_GET(DART_PARAMS_PAGE_SHIFT, dart_params[0]);
+ dart->supports_bypass = dart_params[1] & DART_PARAMS_BYPASS_SUPPORT;
+ dart->force_bypass = dart->pgsize > PAGE_SIZE;
+
+ ret = request_irq(dart->irq, apple_dart_irq, IRQF_SHARED,
+ "apple-dart fault handler", dart);
+ if (ret)
+ goto err_clk_disable;
+
+ platform_set_drvdata(pdev, dart);
+
+ ret = apple_dart_set_bus_ops(&apple_dart_iommu_ops);
+ if (ret)
+ goto err_free_irq;
+
+ ret = iommu_device_sysfs_add(&dart->iommu, dev, NULL, "apple-dart.%s",
+ dev_name(&pdev->dev));
+ if (ret)
+ goto err_remove_bus_ops;
+
+ ret = iommu_device_register(&dart->iommu, &apple_dart_iommu_ops, dev);
+ if (ret)
+ goto err_sysfs_remove;
+
+ dev_info(
+ &pdev->dev,
+ "DART [pagesize %x, bypass support: %d, bypass forced: %d] initialized\n",
+ dart->pgsize, dart->supports_bypass, dart->force_bypass);
+ return 0;
+
+err_sysfs_remove:
+ iommu_device_sysfs_remove(&dart->iommu);
+err_remove_bus_ops:
+ apple_dart_set_bus_ops(NULL);
+err_free_irq:
+ free_irq(dart->irq, dart);
+err_clk_disable:
+ clk_bulk_disable_unprepare(dart->num_clks, dart->clks);
+
+ return ret;
+}
+
+static int apple_dart_remove(struct platform_device *pdev)
+{
+ struct apple_dart *dart = platform_get_drvdata(pdev);
+
+ apple_dart_hw_reset(dart);
+ free_irq(dart->irq, dart);
+ apple_dart_set_bus_ops(NULL);
+
+ iommu_device_unregister(&dart->iommu);
+ iommu_device_sysfs_remove(&dart->iommu);
+
+ clk_bulk_disable_unprepare(dart->num_clks, dart->clks);
+
+ return 0;
+}
+
+static const struct of_device_id apple_dart_of_match[] = {
+ { .compatible = "apple,t8103-dart", .data = NULL },
+ {},
+};
+MODULE_DEVICE_TABLE(of, apple_dart_of_match);
+
+static struct platform_driver apple_dart_driver = {
+ .driver = {
+ .name = "apple-dart",
+ .of_match_table = apple_dart_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = apple_dart_probe,
+ .remove = apple_dart_remove,
+};
+
+module_platform_driver(apple_dart_driver);
+
+MODULE_DESCRIPTION("IOMMU API for Apple's DART");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 235f9bdaeaf2..a388e318f86e 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -335,10 +335,14 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
return 0;
}
+static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu)
+{
+ return &smmu->cmdq;
+}
+
static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
- u32 prod)
+ struct arm_smmu_queue *q, u32 prod)
{
- struct arm_smmu_queue *q = &smmu->cmdq.q;
struct arm_smmu_cmdq_ent ent = {
.opcode = CMDQ_OP_CMD_SYNC,
};
@@ -355,7 +359,8 @@ static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
arm_smmu_cmdq_build_cmd(cmd, &ent);
}
-static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
+static void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
+ struct arm_smmu_queue *q)
{
static const char * const cerror_str[] = {
[CMDQ_ERR_CERROR_NONE_IDX] = "No error",
@@ -366,7 +371,6 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
int i;
u64 cmd[CMDQ_ENT_DWORDS];
- struct arm_smmu_queue *q = &smmu->cmdq.q;
u32 cons = readl_relaxed(q->cons_reg);
u32 idx = FIELD_GET(CMDQ_CONS_ERR, cons);
struct arm_smmu_cmdq_ent cmd_sync = {
@@ -413,6 +417,11 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
+static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
+{
+ __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq.q);
+}
+
/*
* Command queue locking.
* This is a form of bastardised rwlock with the following major changes:
@@ -579,7 +588,7 @@ static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
{
unsigned long flags;
struct arm_smmu_queue_poll qp;
- struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
+ struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
int ret = 0;
/*
@@ -595,7 +604,7 @@ static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
queue_poll_init(smmu, &qp);
do {
- llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
+ llq->val = READ_ONCE(cmdq->q.llq.val);
if (!queue_full(llq))
break;
@@ -614,7 +623,7 @@ static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
{
int ret = 0;
struct arm_smmu_queue_poll qp;
- struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
+ struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
queue_poll_init(smmu, &qp);
@@ -637,12 +646,12 @@ static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
struct arm_smmu_ll_queue *llq)
{
struct arm_smmu_queue_poll qp;
- struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
+ struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
u32 prod = llq->prod;
int ret = 0;
queue_poll_init(smmu, &qp);
- llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
+ llq->val = READ_ONCE(cmdq->q.llq.val);
do {
if (queue_consumed(llq, prod))
break;
@@ -732,12 +741,12 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
u32 prod;
unsigned long flags;
bool owner;
- struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
- struct arm_smmu_ll_queue llq = {
- .max_n_shift = cmdq->q.llq.max_n_shift,
- }, head = llq;
+ struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
+ struct arm_smmu_ll_queue llq, head;
int ret = 0;
+ llq.max_n_shift = cmdq->q.llq.max_n_shift;
+
/* 1. Allocate some space in the queue */
local_irq_save(flags);
llq.val = READ_ONCE(cmdq->q.llq.val);
@@ -772,7 +781,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
if (sync) {
prod = queue_inc_prod_n(&llq, n);
- arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, prod);
+ arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod);
queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
/*
@@ -845,8 +854,9 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
return ret;
}
-static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
- struct arm_smmu_cmdq_ent *ent)
+static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_ent *ent,
+ bool sync)
{
u64 cmd[CMDQ_ENT_DWORDS];
@@ -856,12 +866,19 @@ static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
return -EINVAL;
}
- return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, false);
+ return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, sync);
+}
+
+static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_ent *ent)
+{
+ return __arm_smmu_cmdq_issue_cmd(smmu, ent, false);
}
-static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
+static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_ent *ent)
{
- return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true);
+ return __arm_smmu_cmdq_issue_cmd(smmu, ent, true);
}
static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
@@ -929,8 +946,7 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
.tlbi.asid = asid,
};
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- arm_smmu_cmdq_issue_sync(smmu);
+ arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
@@ -939,7 +955,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
size_t i;
unsigned long flags;
struct arm_smmu_master *master;
- struct arm_smmu_cmdq_batch cmds = {};
+ struct arm_smmu_cmdq_batch cmds;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_CFGI_CD,
@@ -949,6 +965,8 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
},
};
+ cmds.num = 0;
+
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
for (i = 0; i < master->num_streams; i++) {
@@ -1211,8 +1229,7 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
},
};
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- arm_smmu_cmdq_issue_sync(smmu);
+ arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
@@ -1747,15 +1764,16 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
{
int i;
struct arm_smmu_cmdq_ent cmd;
+ struct arm_smmu_cmdq_batch cmds = {};
arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
for (i = 0; i < master->num_streams; i++) {
cmd.atc.sid = master->streams[i].id;
- arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
+ arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd);
}
- return arm_smmu_cmdq_issue_sync(master->smmu);
+ return arm_smmu_cmdq_batch_submit(master->smmu, &cmds);
}
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
@@ -1765,7 +1783,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
unsigned long flags;
struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_master *master;
- struct arm_smmu_cmdq_batch cmds = {};
+ struct arm_smmu_cmdq_batch cmds;
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
return 0;
@@ -1789,6 +1807,8 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
+ cmds.num = 0;
+
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
if (!master->ats_enabled)
@@ -1823,8 +1843,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
} else {
cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- arm_smmu_cmdq_issue_sync(smmu);
+ arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
}
@@ -1837,7 +1856,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
struct arm_smmu_device *smmu = smmu_domain->smmu;
unsigned long end = iova + size, num_pages = 0, tg = 0;
size_t inv_range = granule;
- struct arm_smmu_cmdq_batch cmds = {};
+ struct arm_smmu_cmdq_batch cmds;
if (!size)
return;
@@ -1855,6 +1874,8 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
num_pages = size >> tg;
}
+ cmds.num = 0;
+
while (iova < end) {
if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
/*
@@ -1972,6 +1993,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
if (type != IOMMU_DOMAIN_UNMANAGED &&
type != IOMMU_DOMAIN_DMA &&
+ type != IOMMU_DOMAIN_DMA_FQ &&
type != IOMMU_DOMAIN_IDENTITY)
return NULL;
@@ -1984,12 +2006,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
if (!smmu_domain)
return NULL;
- if (type == IOMMU_DOMAIN_DMA &&
- iommu_get_dma_cookie(&smmu_domain->domain)) {
- kfree(smmu_domain);
- return NULL;
- }
-
mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
spin_lock_init(&smmu_domain->devices_lock);
@@ -2021,7 +2037,6 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
- iommu_put_dma_cookie(domain);
free_io_pgtable_ops(smmu_domain->pgtbl_ops);
/* Free the CD and ASID, if we allocated them */
@@ -2181,9 +2196,6 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
.iommu_dev = smmu->dev,
};
- if (!iommu_get_dma_strict(domain))
- pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
-
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops)
return -ENOMEM;
@@ -2439,19 +2451,21 @@ out_unlock:
return ret;
}
-static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
if (!ops)
return -ENODEV;
- return ops->map(ops, iova, paddr, size, prot, gfp);
+ return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
}
-static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
@@ -2459,7 +2473,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
if (!ops)
return 0;
- return ops->unmap(ops, iova, size, gather);
+ return ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
}
static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
@@ -2488,9 +2502,6 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
- if (domain->type == IOMMU_DOMAIN_IDENTITY)
- return iova;
-
if (!ops)
return 0;
@@ -2825,8 +2836,8 @@ static struct iommu_ops arm_smmu_ops = {
.domain_alloc = arm_smmu_domain_alloc,
.domain_free = arm_smmu_domain_free,
.attach_dev = arm_smmu_attach_dev,
- .map = arm_smmu_map,
- .unmap = arm_smmu_unmap,
+ .map_pages = arm_smmu_map_pages,
+ .unmap_pages = arm_smmu_unmap_pages,
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
@@ -3338,18 +3349,16 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
/* Invalidate any cached configuration */
cmd.opcode = CMDQ_OP_CFGI_ALL;
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- arm_smmu_cmdq_issue_sync(smmu);
+ arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
/* Invalidate any stale TLB entries */
if (smmu->features & ARM_SMMU_FEAT_HYP) {
cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- arm_smmu_cmdq_issue_sync(smmu);
+ arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
/* Event queue */
writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 9b9d13ec5a88..55690af1b25d 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -193,6 +193,8 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
{
struct adreno_smmu_priv *priv;
+ smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
+
/* Only enable split pagetables for the GPU device (SID 0) */
if (!qcom_adreno_smmu_is_gpu_device(dev))
return 0;
@@ -235,6 +237,14 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ }
};
+static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
+{
+ smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
+
+ return 0;
+}
+
static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
{
unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
@@ -358,6 +368,7 @@ static int qcom_smmu500_reset(struct arm_smmu_device *smmu)
}
static const struct arm_smmu_impl qcom_smmu_impl = {
+ .init_context = qcom_smmu_init_context,
.cfg_probe = qcom_smmu_cfg_probe,
.def_domain_type = qcom_smmu_def_domain_type,
.reset = qcom_smmu500_reset,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index f22dbeb1e510..4bc75c4ce402 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -327,9 +327,16 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
- arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
- ARM_SMMU_CB_S1_TLBIVA);
- arm_smmu_tlb_sync_context(cookie);
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+
+ if (cfg->flush_walk_prefer_tlbiasid) {
+ arm_smmu_tlb_inv_context_s1(cookie);
+ } else {
+ arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVA);
+ arm_smmu_tlb_sync_context(cookie);
+ }
}
static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
@@ -765,9 +772,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.iommu_dev = smmu->dev,
};
- if (!iommu_get_dma_strict(domain))
- pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
-
if (smmu->impl && smmu->impl->init_context) {
ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
if (ret)
@@ -868,10 +872,11 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED &&
- type != IOMMU_DOMAIN_DMA &&
- type != IOMMU_DOMAIN_IDENTITY)
- return NULL;
+ if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_IDENTITY) {
+ if (using_legacy_binding ||
+ (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_DMA_FQ))
+ return NULL;
+ }
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@@ -881,12 +886,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
if (!smmu_domain)
return NULL;
- if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
- iommu_get_dma_cookie(&smmu_domain->domain))) {
- kfree(smmu_domain);
- return NULL;
- }
-
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->cb_lock);
@@ -901,7 +900,6 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
* Free the domain resources. We assume that all devices have
* already been detached.
*/
- iommu_put_dma_cookie(domain);
arm_smmu_destroy_domain_context(domain);
kfree(smmu_domain);
}
@@ -1198,8 +1196,9 @@ rpm_put:
return ret;
}
-static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
@@ -1209,14 +1208,15 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
arm_smmu_rpm_get(smmu);
- ret = ops->map(ops, iova, paddr, size, prot, gfp);
+ ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
arm_smmu_rpm_put(smmu);
return ret;
}
-static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *iotlb_gather)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
@@ -1226,7 +1226,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return 0;
arm_smmu_rpm_get(smmu);
- ret = ops->unmap(ops, iova, size, gather);
+ ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather);
arm_smmu_rpm_put(smmu);
return ret;
@@ -1320,9 +1320,6 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
- if (domain->type == IOMMU_DOMAIN_IDENTITY)
- return iova;
-
if (!ops)
return 0;
@@ -1478,16 +1475,21 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
struct iommu_group *group = NULL;
int i, idx;
+ mutex_lock(&smmu->stream_map_mutex);
for_each_cfg_sme(cfg, fwspec, i, idx) {
if (group && smmu->s2crs[idx].group &&
- group != smmu->s2crs[idx].group)
+ group != smmu->s2crs[idx].group) {
+ mutex_unlock(&smmu->stream_map_mutex);
return ERR_PTR(-EINVAL);
+ }
group = smmu->s2crs[idx].group;
}
- if (group)
+ if (group) {
+ mutex_unlock(&smmu->stream_map_mutex);
return iommu_group_ref_get(group);
+ }
if (dev_is_pci(dev))
group = pci_device_group(dev);
@@ -1501,6 +1503,7 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
for_each_cfg_sme(cfg, fwspec, i, idx)
smmu->s2crs[idx].group = group;
+ mutex_unlock(&smmu->stream_map_mutex);
return group;
}
@@ -1582,8 +1585,8 @@ static struct iommu_ops arm_smmu_ops = {
.domain_alloc = arm_smmu_domain_alloc,
.domain_free = arm_smmu_domain_free,
.attach_dev = arm_smmu_attach_dev,
- .map = arm_smmu_map,
- .unmap = arm_smmu_unmap,
+ .map_pages = arm_smmu_map_pages,
+ .unmap_pages = arm_smmu_unmap_pages,
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
@@ -2281,18 +2284,38 @@ static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
{
+ int ret;
+ struct arm_smmu_device *smmu = dev_get_drvdata(dev);
+
+ ret = clk_bulk_prepare(smmu->num_clks, smmu->clks);
+ if (ret)
+ return ret;
+
if (pm_runtime_suspended(dev))
return 0;
- return arm_smmu_runtime_resume(dev);
+ ret = arm_smmu_runtime_resume(dev);
+ if (ret)
+ clk_bulk_unprepare(smmu->num_clks, smmu->clks);
+
+ return ret;
}
static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
{
+ int ret = 0;
+ struct arm_smmu_device *smmu = dev_get_drvdata(dev);
+
if (pm_runtime_suspended(dev))
- return 0;
+ goto clk_unprepare;
+
+ ret = arm_smmu_runtime_suspend(dev);
+ if (ret)
+ return ret;
- return arm_smmu_runtime_suspend(dev);
+clk_unprepare:
+ clk_bulk_unprepare(smmu->num_clks, smmu->clks);
+ return ret;
}
static const struct dev_pm_ops arm_smmu_pm_ops = {
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index a50271595960..432de2f742c3 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -346,6 +346,7 @@ struct arm_smmu_cfg {
};
enum arm_smmu_cbar_type cbar;
enum arm_smmu_context_fmt fmt;
+ bool flush_walk_prefer_tlbiasid;
};
#define ARM_SMMU_INVALID_IRPTNDX 0xff
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 021cf8f65ffc..b91874cb6cf3 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -10,7 +10,6 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -335,12 +334,6 @@ static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
if (!qcom_domain)
return NULL;
- if (type == IOMMU_DOMAIN_DMA &&
- iommu_get_dma_cookie(&qcom_domain->domain)) {
- kfree(qcom_domain);
- return NULL;
- }
-
mutex_init(&qcom_domain->init_mutex);
spin_lock_init(&qcom_domain->pgtbl_lock);
@@ -351,8 +344,6 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
{
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
- iommu_put_dma_cookie(domain);
-
if (qcom_domain->iommu) {
/*
* NOTE: unmap can be called after client device is powered
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 6f0df629353f..896bea04c347 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -317,6 +317,30 @@ static bool dev_is_untrusted(struct device *dev)
return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
}
+/* sysfs updates are serialised by the mutex of the group owning @domain */
+int iommu_dma_init_fq(struct iommu_domain *domain)
+{
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ int ret;
+
+ if (cookie->fq_domain)
+ return 0;
+
+ ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all,
+ iommu_dma_entry_dtor);
+ if (ret) {
+ pr_warn("iova flush queue initialization failed\n");
+ return ret;
+ }
+ /*
+ * Prevent incomplete iovad->fq being observable. Pairs with path from
+ * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
+ */
+ smp_wmb();
+ WRITE_ONCE(cookie->fq_domain, domain);
+ return 0;
+}
+
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -370,17 +394,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
init_iova_domain(iovad, 1UL << order, base_pfn);
- if (!cookie->fq_domain && (!dev || !dev_is_untrusted(dev)) &&
- domain->ops->flush_iotlb_all && !iommu_get_dma_strict(domain)) {
- if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
- iommu_dma_entry_dtor))
- pr_warn("iova flush queue initialization failed\n");
- else
- cookie->fq_domain = domain;
- }
-
- if (!dev)
- return 0;
+ /* If the FQ fails we can simply fall back to strict mode */
+ if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
+ domain->type = IOMMU_DOMAIN_DMA;
return iova_reserve_iommu_regions(dev, domain);
}
@@ -455,17 +471,17 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
}
static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
- dma_addr_t iova, size_t size, struct page *freelist)
+ dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
{
struct iova_domain *iovad = &cookie->iovad;
/* The MSI case is only ever cleaning up its most recent allocation */
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
cookie->msi_iova -= size;
- else if (cookie->fq_domain) /* non-strict mode */
+ else if (gather && gather->queued)
queue_iova(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad),
- (unsigned long)freelist);
+ (unsigned long)gather->freelist);
else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
@@ -484,13 +500,14 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
dma_addr -= iova_off;
size = iova_align(iovad, size + iova_off);
iommu_iotlb_gather_init(&iotlb_gather);
+ iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
WARN_ON(unmapped != size);
- if (!cookie->fq_domain)
+ if (!iotlb_gather.queued)
iommu_iotlb_sync(domain, &iotlb_gather);
- iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
+ iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
}
static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
@@ -506,7 +523,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
__iommu_dma_unmap(dev, dma_addr, size);
- if (unlikely(is_swiotlb_buffer(phys)))
+ if (unlikely(is_swiotlb_buffer(dev, phys)))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}
@@ -577,7 +594,7 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
}
iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
- if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
+ if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
return iova;
}
@@ -784,7 +801,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(phys, size, dir);
- if (is_swiotlb_buffer(phys))
+ if (is_swiotlb_buffer(dev, phys))
swiotlb_sync_single_for_cpu(dev, phys, size, dir);
}
@@ -797,7 +814,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
- if (is_swiotlb_buffer(phys))
+ if (is_swiotlb_buffer(dev, phys))
swiotlb_sync_single_for_device(dev, phys, size, dir);
if (!dev_is_dma_coherent(dev))
@@ -818,7 +835,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
- if (is_swiotlb_buffer(sg_phys(sg)))
+ if (is_swiotlb_buffer(dev, sg_phys(sg)))
swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
sg->length, dir);
}
@@ -835,7 +852,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
return;
for_each_sg(sgl, sg, nelems, i) {
- if (is_swiotlb_buffer(sg_phys(sg)))
+ if (is_swiotlb_buffer(dev, sg_phys(sg)))
swiotlb_sync_single_for_device(dev, sg_phys(sg),
sg->length, dir);
@@ -973,7 +990,7 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
out_unmap:
iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
- return 0;
+ return -EIO;
}
/*
@@ -994,11 +1011,13 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
+ ssize_t ret;
int i;
- if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
- iommu_deferred_attach(dev, domain))
- return 0;
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
+ ret = iommu_deferred_attach(dev, domain);
+ goto out;
+ }
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
@@ -1046,14 +1065,17 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
}
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
- if (!iova)
+ if (!iova) {
+ ret = -ENOMEM;
goto out_restore_sg;
+ }
/*
* We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do.
*/
- if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
+ ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot);
+ if (ret < iova_len)
goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova);
@@ -1062,7 +1084,10 @@ out_free_iova:
iommu_dma_free_iova(cookie, iova, iova_len, NULL);
out_restore_sg:
__invalidate_sg(sg, nents);
- return 0;
+out:
+ if (ret != -ENOMEM)
+ return -EINVAL;
+ return ret;
}
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
@@ -1322,7 +1347,7 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
* The IOMMU core code allocates the default DMA domain, which the
* underlying IOMMU driver needs to support via the dma-iommu layer.
*/
- if (domain->type == IOMMU_DOMAIN_DMA) {
+ if (iommu_is_dma_domain(domain)) {
if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev))
goto out_err;
dev->dma_ops = &iommu_dma_ops;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index d0fbf1d10e18..939ffa768986 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -21,7 +21,6 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include <linux/dma-iommu.h>
typedef u32 sysmmu_iova_t;
typedef u32 sysmmu_pte_t;
@@ -735,20 +734,16 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
/* Check if correct PTE offsets are initialized */
BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
+ if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
- if (type == IOMMU_DOMAIN_DMA) {
- if (iommu_get_dma_cookie(&domain->domain) != 0)
- goto err_pgtable;
- } else if (type != IOMMU_DOMAIN_UNMANAGED) {
- goto err_pgtable;
- }
-
domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
if (!domain->pgtable)
- goto err_dma_cookie;
+ goto err_pgtable;
domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!domain->lv2entcnt)
@@ -779,9 +774,6 @@ err_lv2ent:
free_pages((unsigned long)domain->lv2entcnt, 1);
err_counter:
free_pages((unsigned long)domain->pgtable, 2);
-err_dma_cookie:
- if (type == IOMMU_DOMAIN_DMA)
- iommu_put_dma_cookie(&domain->domain);
err_pgtable:
kfree(domain);
return NULL;
@@ -809,9 +801,6 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
spin_unlock_irqrestore(&domain->lock, flags);
- if (iommu_domain->type == IOMMU_DOMAIN_DMA)
- iommu_put_dma_cookie(iommu_domain);
-
dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
DMA_TO_DEVICE);
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 43ebd8af11c5..0ddb77115be7 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -25,9 +25,11 @@ config INTEL_IOMMU
and include PCI device scope covered by these DMA
remapping devices.
+if INTEL_IOMMU
+
config INTEL_IOMMU_DEBUGFS
bool "Export Intel IOMMU internals in Debugfs"
- depends on INTEL_IOMMU && IOMMU_DEBUGFS
+ depends on IOMMU_DEBUGFS
select DMAR_PERF
help
!!!WARNING!!!
@@ -41,7 +43,7 @@ config INTEL_IOMMU_DEBUGFS
config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
- depends on INTEL_IOMMU && X86_64
+ depends on X86_64
select PCI_PASID
select PCI_PRI
select MMU_NOTIFIER
@@ -53,9 +55,8 @@ config INTEL_IOMMU_SVM
means of a Process Address Space ID (PASID).
config INTEL_IOMMU_DEFAULT_ON
- def_bool y
- prompt "Enable Intel DMA Remapping Devices by default"
- depends on INTEL_IOMMU
+ bool "Enable Intel DMA Remapping Devices by default"
+ default y
help
Selecting this option will enable a DMAR device at boot time if
one is found. If this option is not selected, DMAR support can
@@ -63,7 +64,7 @@ config INTEL_IOMMU_DEFAULT_ON
config INTEL_IOMMU_BROKEN_GFX_WA
bool "Workaround broken graphics drivers (going away soon)"
- depends on INTEL_IOMMU && BROKEN && X86
+ depends on BROKEN && X86
help
Current Graphics drivers tend to use physical address
for DMA and avoid using DMA APIs. Setting this config
@@ -74,7 +75,7 @@ config INTEL_IOMMU_BROKEN_GFX_WA
config INTEL_IOMMU_FLOPPY_WA
def_bool y
- depends on INTEL_IOMMU && X86
+ depends on X86
help
Floppy disk drivers are known to bypass DMA API calls
thereby failing to work when IOMMU is enabled. This
@@ -83,7 +84,7 @@ config INTEL_IOMMU_FLOPPY_WA
config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
bool "Enable Intel IOMMU scalable mode by default"
- depends on INTEL_IOMMU
+ default y
help
Selecting this option will enable by default the scalable mode if
hardware presents the capability. The scalable mode is defined in
@@ -92,3 +93,5 @@ config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
is not selected, scalable mode support could also be enabled by
passing intel_iommu=sm_on to the kernel. If not sure, please use
the default value.
+
+endif # INTEL_IOMMU
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index d66f79acd14d..0ec5514c9980 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -149,8 +149,6 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
} else {
info = kzalloc(size, GFP_KERNEL);
if (!info) {
- pr_warn("Out of memory when allocating notify_info "
- "for %s.\n", pci_name(dev));
if (dmar_dev_scope_status == 0)
dmar_dev_scope_status = -ENOMEM;
return NULL;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index dd22fc7d5176..d75f59ae28e6 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -33,6 +33,7 @@
#include <linux/iommu.h>
#include <linux/dma-iommu.h>
#include <linux/intel-iommu.h>
+#include <linux/intel-svm.h>
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
#include <linux/dmi.h>
@@ -85,24 +86,6 @@
#define LEVEL_STRIDE (9)
#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
-/*
- * This bitmap is used to advertise the page sizes our hardware support
- * to the IOMMU core, which will then use this information to split
- * physically contiguous memory regions it is mapping into page sizes
- * that we support.
- *
- * Traditionally the IOMMU core just handed us the mappings directly,
- * after making sure the size is an order of a 4KiB page and that the
- * mapping has natural alignment.
- *
- * To retain this behavior, we currently advertise that we support
- * all page sizes that are an order of 4KiB.
- *
- * If at some point we'd like to utilize the IOMMU core's new behavior,
- * we could change this to advertise the real page sizes we support.
- */
-#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
-
static inline int agaw_to_level(int agaw)
{
return agaw + 2;
@@ -345,23 +328,13 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova);
-#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
-int dmar_disabled = 0;
-#else
-int dmar_disabled = 1;
-#endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
-
-#ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
-int intel_iommu_sm = 1;
-#else
-int intel_iommu_sm;
-#endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
+int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON);
+int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
static int dmar_map_gfx = 1;
-static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
static int iommu_skip_te_disable;
@@ -454,14 +427,17 @@ static int __init intel_iommu_setup(char *str)
pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
iommu_dma_forcedac = true;
} else if (!strncmp(str, "strict", 6)) {
- pr_info("Disable batched IOTLB flush\n");
- intel_iommu_strict = 1;
+ pr_warn("intel_iommu=strict deprecated; use iommu.strict=1 instead\n");
+ iommu_set_dma_strict();
} else if (!strncmp(str, "sp_off", 6)) {
pr_info("Disable supported super page\n");
intel_iommu_superpage = 0;
} else if (!strncmp(str, "sm_on", 5)) {
- pr_info("Intel-IOMMU: scalable mode supported\n");
+ pr_info("Enable scalable mode if hardware supports\n");
intel_iommu_sm = 1;
+ } else if (!strncmp(str, "sm_off", 6)) {
+ pr_info("Scalable mode is disallowed\n");
+ intel_iommu_sm = 0;
} else if (!strncmp(str, "tboot_noforce", 13)) {
pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
intel_iommu_tboot_noforce = 1;
@@ -601,7 +577,7 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
int iommu_id;
/* si_domain and vm domain should not get here. */
- if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
+ if (WARN_ON(!iommu_is_dma_domain(&domain->domain)))
return NULL;
for_each_domain_iommu(iommu_id, domain)
@@ -736,6 +712,23 @@ static int domain_update_device_node(struct dmar_domain *domain)
static void domain_update_iotlb(struct dmar_domain *domain);
+/* Return the super pagesize bitmap if supported. */
+static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
+{
+ unsigned long bitmap = 0;
+
+ /*
+ * 1-level super page supports page size of 2MiB, 2-level super page
+ * supports page size of both 2MiB and 1GiB.
+ */
+ if (domain->iommu_superpage == 1)
+ bitmap |= SZ_2M;
+ else if (domain->iommu_superpage == 2)
+ bitmap |= SZ_2M | SZ_1G;
+
+ return bitmap;
+}
+
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
@@ -762,6 +755,7 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
else
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
+ domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain);
domain_update_iotlb(domain);
}
@@ -1035,7 +1029,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
if (domain_use_first_level(domain)) {
pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
- if (domain->domain.type == IOMMU_DOMAIN_DMA)
+ if (iommu_is_dma_domain(&domain->domain))
pteval |= DMA_FL_PTE_ACCESS;
}
if (cmpxchg64(&pte->val, 0ULL, pteval))
@@ -1548,7 +1542,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
if (info->pri_supported &&
(info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
- !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
+ !pci_reset_pri(pdev) && !pci_enable_pri(pdev, PRQ_DEPTH))
info->pri_enabled = 1;
#endif
if (info->ats_supported && pci_ats_page_aligned(pdev) &&
@@ -1780,11 +1774,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
spin_lock_init(&iommu->lock);
iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
- if (!iommu->domain_ids) {
- pr_err("%s: Allocating domain id array failed\n",
- iommu->name);
+ if (!iommu->domain_ids)
return -ENOMEM;
- }
size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
iommu->domains = kzalloc(size, GFP_KERNEL);
@@ -1980,10 +1971,6 @@ static void domain_exit(struct dmar_domain *domain)
/* Remove associated devices and clear attached or cached domains */
domain_remove_dev_info(domain);
- /* destroy iovas */
- if (domain->domain.type == IOMMU_DOMAIN_DMA)
- iommu_put_dma_cookie(&domain->domain);
-
if (domain->pgd) {
struct page *freelist;
@@ -2334,9 +2321,9 @@ static int
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages, int prot)
{
+ struct dma_pte *first_pte = NULL, *pte = NULL;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
- struct dma_pte *pte = NULL;
phys_addr_t pteval;
u64 attr;
@@ -2348,13 +2335,9 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
attr |= DMA_FL_PTE_PRESENT;
if (domain_use_first_level(domain)) {
- attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
-
- if (domain->domain.type == IOMMU_DOMAIN_DMA) {
- attr |= DMA_FL_PTE_ACCESS;
- if (prot & DMA_PTE_WRITE)
- attr |= DMA_FL_PTE_DIRTY;
- }
+ attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
+ if (prot & DMA_PTE_WRITE)
+ attr |= DMA_FL_PTE_DIRTY;
}
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
@@ -2369,6 +2352,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
if (!pte)
return -ENOMEM;
+ first_pte = pte;
+
/* It is large page*/
if (largepage_lvl > 1) {
unsigned long end_pfn;
@@ -2416,14 +2401,14 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
* recalculate 'pte' and switch back to smaller pages for the
* end of the mapping, if the trailing size is not enough to
* use another superpage (i.e. nr_pages < lvl_pages).
- *
- * We leave clflush for the leaf pte changes to iotlb_sync_map()
- * callback.
*/
pte++;
if (!nr_pages || first_pte_in_page(pte) ||
- (largepage_lvl > 1 && nr_pages < lvl_pages))
+ (largepage_lvl > 1 && nr_pages < lvl_pages)) {
+ domain_flush_cache(domain, first_pte,
+ (void *)pte - (void *)first_pte);
pte = NULL;
+ }
}
return 0;
@@ -3227,7 +3212,6 @@ static int __init init_dmars(void)
g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
GFP_KERNEL);
if (!g_iommus) {
- pr_err("Allocating global iommu array failed\n");
ret = -ENOMEM;
goto error;
}
@@ -4393,9 +4377,9 @@ int __init intel_iommu_init(void)
* is likely to be much lower than the overhead of synchronizing
* the virtual and physical IOMMU page-tables.
*/
- if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
- pr_warn("IOMMU batching is disabled due to virtualization");
- intel_iommu_strict = 1;
+ if (cap_caching_mode(iommu->cap)) {
+ pr_info_once("IOMMU batching disallowed due to virtualization\n");
+ iommu_set_dma_strict();
}
iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups,
@@ -4404,7 +4388,6 @@ int __init intel_iommu_init(void)
}
up_read(&dmar_global_lock);
- iommu_set_dma_strict(intel_iommu_strict);
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
@@ -4532,6 +4515,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
switch (type) {
case IOMMU_DOMAIN_DMA:
+ case IOMMU_DOMAIN_DMA_FQ:
case IOMMU_DOMAIN_UNMANAGED:
dmar_domain = alloc_domain(0);
if (!dmar_domain) {
@@ -4544,10 +4528,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
return NULL;
}
- if (type == IOMMU_DOMAIN_DMA &&
- iommu_get_dma_cookie(&dmar_domain->domain))
- return NULL;
-
domain = &dmar_domain->domain;
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end =
@@ -5067,6 +5047,28 @@ static int intel_iommu_map(struct iommu_domain *domain,
hpa >> VTD_PAGE_SHIFT, size, prot);
}
+static int intel_iommu_map_pages(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
+{
+ unsigned long pgshift = __ffs(pgsize);
+ size_t size = pgcount << pgshift;
+ int ret;
+
+ if (pgsize != SZ_4K && pgsize != SZ_2M && pgsize != SZ_1G)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(iova | paddr, pgsize))
+ return -EINVAL;
+
+ ret = intel_iommu_map(domain, iova, paddr, size, prot, gfp);
+ if (!ret && mapped)
+ *mapped = size;
+
+ return ret;
+}
+
static size_t intel_iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *gather)
@@ -5096,6 +5098,17 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
return size;
}
+static size_t intel_iommu_unmap_pages(struct iommu_domain *domain,
+ unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
+{
+ unsigned long pgshift = __ffs(pgsize);
+ size_t size = pgcount << pgshift;
+
+ return intel_iommu_unmap(domain, iova, size, gather);
+}
+
static void intel_iommu_tlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
@@ -5172,12 +5185,8 @@ static void intel_iommu_release_device(struct device *dev)
static void intel_iommu_probe_finalize(struct device *dev)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-
- if (domain && domain->type == IOMMU_DOMAIN_DMA)
- iommu_setup_dma_ops(dev, 0, U64_MAX);
- else
- set_dma_ops(dev, NULL);
+ set_dma_ops(dev, NULL);
+ iommu_setup_dma_ops(dev, 0, U64_MAX);
}
static void intel_iommu_get_resv_regions(struct device *device,
@@ -5532,39 +5541,6 @@ static bool risky_device(struct pci_dev *pdev)
return false;
}
-static void clflush_sync_map(struct dmar_domain *domain, unsigned long clf_pfn,
- unsigned long clf_pages)
-{
- struct dma_pte *first_pte = NULL, *pte = NULL;
- unsigned long lvl_pages = 0;
- int level = 0;
-
- while (clf_pages > 0) {
- if (!pte) {
- level = 0;
- pte = pfn_to_dma_pte(domain, clf_pfn, &level);
- if (WARN_ON(!pte))
- return;
- first_pte = pte;
- lvl_pages = lvl_to_nr_pages(level);
- }
-
- if (WARN_ON(!lvl_pages || clf_pages < lvl_pages))
- return;
-
- clf_pages -= lvl_pages;
- clf_pfn += lvl_pages;
- pte++;
-
- if (!clf_pages || first_pte_in_page(pte) ||
- (level > 1 && clf_pages < lvl_pages)) {
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
- pte = NULL;
- }
- }
-}
-
static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
@@ -5574,9 +5550,6 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
struct intel_iommu *iommu;
int iommu_id;
- if (!dmar_domain->iommu_coherency)
- clflush_sync_map(dmar_domain, pfn, pages);
-
for_each_domain_iommu(iommu_id, dmar_domain) {
iommu = g_iommus[iommu_id];
__mapping_notify_one(iommu, dmar_domain, pfn, pages);
@@ -5593,9 +5566,9 @@ const struct iommu_ops intel_iommu_ops = {
.aux_attach_dev = intel_iommu_aux_attach_device,
.aux_detach_dev = intel_iommu_aux_detach_device,
.aux_get_pasid = intel_iommu_aux_get_pasid,
- .map = intel_iommu_map,
+ .map_pages = intel_iommu_map_pages,
+ .unmap_pages = intel_iommu_unmap_pages,
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
- .unmap = intel_iommu_unmap,
.flush_iotlb_all = intel_flush_iotlb_all,
.iotlb_sync = intel_iommu_tlb_sync,
.iova_to_phys = intel_iommu_iova_to_phys,
@@ -5611,7 +5584,7 @@ const struct iommu_ops intel_iommu_ops = {
.dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
.def_domain_type = device_def_domain_type,
- .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
+ .pgsize_bitmap = SZ_4K,
#ifdef CONFIG_INTEL_IOMMU_SVM
.cache_invalidate = intel_iommu_sva_invalidate,
.sva_bind_gpasid = intel_svm_bind_gpasid,
@@ -5714,8 +5687,8 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
} else if (dmar_map_gfx) {
/* we have to ensure the gfx device is idle before we flush */
pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
- intel_iommu_strict = 1;
- }
+ iommu_set_dma_strict();
+ }
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 9ec374e17469..07c390aed1fe 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -517,7 +517,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
if (WARN_ON(!pte))
return;
- if (!(pte->val[0] & PASID_PTE_PRESENT))
+ if (!pasid_pte_is_present(pte))
return;
did = pasid_get_domain_id(pte);
@@ -540,6 +540,10 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
devtlb_invalidation_with_pasid(iommu, dev, pasid);
}
+/*
+ * This function flushes cache for a newly setup pasid table entry.
+ * Caller of it should not modify the in-use pasid table entries.
+ */
static void pasid_flush_caches(struct intel_iommu *iommu,
struct pasid_entry *pte,
u32 pasid, u16 did)
@@ -591,6 +595,10 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
if (WARN_ON(!pte))
return -EINVAL;
+ /* Caller must ensure PASID entry is not in use. */
+ if (pasid_pte_is_present(pte))
+ return -EBUSY;
+
pasid_clear_entry(pte);
/* Setup the first level page table pointer: */
@@ -690,6 +698,10 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
return -ENODEV;
}
+ /* Caller must ensure PASID entry is not in use. */
+ if (pasid_pte_is_present(pte))
+ return -EBUSY;
+
pasid_clear_entry(pte);
pasid_set_domain_id(pte, did);
pasid_set_slptr(pte, pgd_val);
@@ -729,6 +741,10 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
return -ENODEV;
}
+ /* Caller must ensure PASID entry is not in use. */
+ if (pasid_pte_is_present(pte))
+ return -EBUSY;
+
pasid_clear_entry(pte);
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, iommu->agaw);
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index c11bc8b833b8..d5552e2c160d 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -28,12 +28,12 @@
#define VCMD_CMD_ALLOC 0x1
#define VCMD_CMD_FREE 0x2
#define VCMD_VRSP_IP 0x1
-#define VCMD_VRSP_SC(e) (((e) >> 1) & 0x3)
+#define VCMD_VRSP_SC(e) (((e) & 0xff) >> 1)
#define VCMD_VRSP_SC_SUCCESS 0
-#define VCMD_VRSP_SC_NO_PASID_AVAIL 2
-#define VCMD_VRSP_SC_INVALID_PASID 2
-#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 8) & 0xfffff)
-#define VCMD_CMD_OPERAND(e) ((e) << 8)
+#define VCMD_VRSP_SC_NO_PASID_AVAIL 16
+#define VCMD_VRSP_SC_INVALID_PASID 16
+#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 16) & 0xfffff)
+#define VCMD_CMD_OPERAND(e) ((e) << 16)
/*
* Domain ID reserved for pasid entries programmed for first-level
* only and pass-through transfer modes.
diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c
index 73b7ec705552..0e8e03252d92 100644
--- a/drivers/iommu/intel/perf.c
+++ b/drivers/iommu/intel/perf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* perf.c - performance monitor
*
* Copyright (C) 2021 Intel Corporation
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 4b9b3f35ba0e..0c228787704f 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -31,8 +31,6 @@ static irqreturn_t prq_event_thread(int irq, void *d);
static void intel_svm_drain_prq(struct device *dev, u32 pasid);
#define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
-#define PRQ_ORDER 0
-
static DEFINE_XARRAY_ALLOC(pasid_private_array);
static int pasid_private_add(ioasid_t pasid, void *priv)
{
@@ -516,9 +514,6 @@ static void load_pasid(struct mm_struct *mm, u32 pasid)
{
mutex_lock(&mm->context.lock);
- /* Synchronize with READ_ONCE in update_pasid(). */
- smp_store_release(&mm->pasid, pasid);
-
/* Update PASID MSR on all CPUs running the mm's tasks. */
on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true);
@@ -725,8 +720,6 @@ struct page_req_dsc {
u64 priv_data[2];
};
-#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
-
static bool is_canonical_address(u64 addr)
{
int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
@@ -796,7 +789,19 @@ prq_retry:
goto prq_retry;
}
+ /*
+ * A work in IO page fault workqueue may try to lock pasid_mutex now.
+ * Holding pasid_mutex while waiting in iopf_queue_flush_dev() for
+ * all works in the workqueue to finish may cause deadlock.
+ *
+ * It's unnecessary to hold pasid_mutex in iopf_queue_flush_dev().
+ * Unlock it to allow the works to be handled while waiting for
+ * them to finish.
+ */
+ lockdep_assert_held(&pasid_mutex);
+ mutex_unlock(&pasid_mutex);
iopf_queue_flush_dev(dev);
+ mutex_lock(&pasid_mutex);
/*
* Perform steps described in VT-d spec CH7.10 to drain page
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index d4004bcf333a..bfb6acb651e5 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -519,11 +519,12 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
}
-static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
- int ret;
+ int ret = -EINVAL;
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
paddr >= (1ULL << data->iop.cfg.oas)))
@@ -533,7 +534,17 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
- ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp);
+ while (pgcount--) {
+ ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd,
+ gfp);
+ if (ret)
+ break;
+
+ iova += pgsize;
+ paddr += pgsize;
+ if (mapped)
+ *mapped += pgsize;
+ }
/*
* Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova.
@@ -543,6 +554,12 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
return ret;
}
+static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ return arm_v7s_map_pages(ops, iova, paddr, size, 1, prot, gfp, NULL);
+}
+
static void arm_v7s_free_pgtable(struct io_pgtable *iop)
{
struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop);
@@ -683,14 +700,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
ARM_V7S_BLOCK_SIZE(lvl + 1));
ptep = iopte_deref(pte[i], lvl, data);
__arm_v7s_free_table(ptep, lvl + 1, data);
- } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
- /*
- * Order the PTE update against queueing the IOVA, to
- * guarantee that a flush callback from a different CPU
- * has observed it before the TLBIALL can be issued.
- */
- smp_wmb();
- } else {
+ } else if (!iommu_iotlb_gather_queued(gather)) {
io_pgtable_tlb_add_page(iop, gather, iova, blk_size);
}
iova += blk_size;
@@ -710,15 +720,32 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
return __arm_v7s_unmap(data, gather, iova, size, lvl + 1, ptep);
}
-static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+static size_t arm_v7s_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ size_t unmapped = 0, ret;
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
return 0;
- return __arm_v7s_unmap(data, gather, iova, size, 1, data->pgd);
+ while (pgcount--) {
+ ret = __arm_v7s_unmap(data, gather, iova, pgsize, 1, data->pgd);
+ if (!ret)
+ break;
+
+ unmapped += pgsize;
+ iova += pgsize;
+ }
+
+ return unmapped;
+}
+
+static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *gather)
+{
+ return arm_v7s_unmap_pages(ops, iova, size, 1, gather);
}
static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
@@ -757,8 +784,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
- IO_PGTABLE_QUIRK_ARM_MTK_EXT |
- IO_PGTABLE_QUIRK_NON_STRICT))
+ IO_PGTABLE_QUIRK_ARM_MTK_EXT))
return NULL;
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
@@ -780,7 +806,9 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
data->iop.ops = (struct io_pgtable_ops) {
.map = arm_v7s_map,
+ .map_pages = arm_v7s_map_pages,
.unmap = arm_v7s_unmap,
+ .unmap_pages = arm_v7s_unmap_pages,
.iova_to_phys = arm_v7s_iova_to_phys,
};
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 87def58e79b5..dd9e47189d0d 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -46,6 +46,9 @@
#define ARM_LPAE_PGD_SIZE(d) \
(sizeof(arm_lpae_iopte) << (d)->pgd_bits)
+#define ARM_LPAE_PTES_PER_TABLE(d) \
+ (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
+
/*
* Calculate the index at level l used to map virtual address a using the
* pagetable in d.
@@ -127,6 +130,9 @@
#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
+#define APPLE_DART_PTE_PROT_NO_WRITE (1<<7)
+#define APPLE_DART_PTE_PROT_NO_READ (1<<8)
+
/* IOPTE accessors */
#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
@@ -232,70 +238,77 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
free_pages((unsigned long)pages, get_order(size));
}
-static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
+static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
struct io_pgtable_cfg *cfg)
{
dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
- sizeof(*ptep), DMA_TO_DEVICE);
+ sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
}
-static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
- struct io_pgtable_cfg *cfg)
+static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg)
{
- *ptep = pte;
+
+ *ptep = 0;
if (!cfg->coherent_walk)
- __arm_lpae_sync_pte(ptep, cfg);
+ __arm_lpae_sync_pte(ptep, 1, cfg);
}
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t size, int lvl,
- arm_lpae_iopte *ptep);
+ unsigned long iova, size_t size, size_t pgcount,
+ int lvl, arm_lpae_iopte *ptep);
static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
phys_addr_t paddr, arm_lpae_iopte prot,
- int lvl, arm_lpae_iopte *ptep)
+ int lvl, int num_entries, arm_lpae_iopte *ptep)
{
arm_lpae_iopte pte = prot;
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
+ int i;
if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
pte |= ARM_LPAE_PTE_TYPE_PAGE;
else
pte |= ARM_LPAE_PTE_TYPE_BLOCK;
- pte |= paddr_to_iopte(paddr, data);
+ for (i = 0; i < num_entries; i++)
+ ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
- __arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
+ if (!cfg->coherent_walk)
+ __arm_lpae_sync_pte(ptep, num_entries, cfg);
}
static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
unsigned long iova, phys_addr_t paddr,
- arm_lpae_iopte prot, int lvl,
+ arm_lpae_iopte prot, int lvl, int num_entries,
arm_lpae_iopte *ptep)
{
- arm_lpae_iopte pte = *ptep;
-
- if (iopte_leaf(pte, lvl, data->iop.fmt)) {
- /* We require an unmap first */
- WARN_ON(!selftest_running);
- return -EEXIST;
- } else if (iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE) {
- /*
- * We need to unmap and free the old table before
- * overwriting it with a block entry.
- */
- arm_lpae_iopte *tblp;
- size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
-
- tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
- if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
- WARN_ON(1);
- return -EINVAL;
+ int i;
+
+ for (i = 0; i < num_entries; i++)
+ if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
+ /* We require an unmap first */
+ WARN_ON(!selftest_running);
+ return -EEXIST;
+ } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
+ /*
+ * We need to unmap and free the old table before
+ * overwriting it with a block entry.
+ */
+ arm_lpae_iopte *tblp;
+ size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+ tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
+ if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
+ lvl, tblp) != sz) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
}
- }
- __arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
+ __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
return 0;
}
@@ -323,7 +336,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
return old;
/* Even if it's not ours, there's no point waiting; just kick it */
- __arm_lpae_sync_pte(ptep, cfg);
+ __arm_lpae_sync_pte(ptep, 1, cfg);
if (old == curr)
WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
@@ -331,20 +344,30 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
}
static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
- phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
- int lvl, arm_lpae_iopte *ptep, gfp_t gfp)
+ phys_addr_t paddr, size_t size, size_t pgcount,
+ arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
+ gfp_t gfp, size_t *mapped)
{
arm_lpae_iopte *cptep, pte;
size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
size_t tblsz = ARM_LPAE_GRANULE(data);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ int ret = 0, num_entries, max_entries, map_idx_start;
/* Find our entry at the current level */
- ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
+ map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
+ ptep += map_idx_start;
/* If we can install a leaf entry at this level, then do so */
- if (size == block_size)
- return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
+ if (size == block_size) {
+ max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
+ num_entries = min_t(int, pgcount, max_entries);
+ ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
+ if (!ret && mapped)
+ *mapped += num_entries * size;
+
+ return ret;
+ }
/* We can't allocate tables at the final level */
if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
@@ -361,7 +384,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
if (pte)
__arm_lpae_free_pages(cptep, tblsz, cfg);
} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
- __arm_lpae_sync_pte(ptep, cfg);
+ __arm_lpae_sync_pte(ptep, 1, cfg);
}
if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
@@ -373,7 +396,8 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
}
/* Rinse, repeat */
- return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
+ return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
+ cptep, gfp, mapped);
}
static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
@@ -381,6 +405,15 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
{
arm_lpae_iopte pte;
+ if (data->iop.fmt == APPLE_DART) {
+ pte = 0;
+ if (!(prot & IOMMU_WRITE))
+ pte |= APPLE_DART_PTE_PROT_NO_WRITE;
+ if (!(prot & IOMMU_READ))
+ pte |= APPLE_DART_PTE_PROT_NO_READ;
+ return pte;
+ }
+
if (data->iop.fmt == ARM_64_LPAE_S1 ||
data->iop.fmt == ARM_32_LPAE_S1) {
pte = ARM_LPAE_PTE_nG;
@@ -440,8 +473,9 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
return pte;
}
-static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
+static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int iommu_prot, gfp_t gfp, size_t *mapped)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
@@ -450,7 +484,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
arm_lpae_iopte prot;
long iaext = (s64)iova >> cfg->ias;
- if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
+ if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
return -EINVAL;
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
@@ -463,7 +497,8 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
return 0;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
- ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
+ ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
+ ptep, gfp, mapped);
/*
* Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova.
@@ -473,6 +508,13 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
return ret;
}
+static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
+{
+ return arm_lpae_map_pages(ops, iova, paddr, size, 1, iommu_prot, gfp,
+ NULL);
+}
+
static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
arm_lpae_iopte *ptep)
{
@@ -516,14 +558,15 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size,
arm_lpae_iopte blk_pte, int lvl,
- arm_lpae_iopte *ptep)
+ arm_lpae_iopte *ptep, size_t pgcount)
{
struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte pte, *tablep;
phys_addr_t blk_paddr;
size_t tablesz = ARM_LPAE_GRANULE(data);
size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
- int i, unmap_idx = -1;
+ int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
+ int i, unmap_idx_start = -1, num_entries = 0, max_entries;
if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
return 0;
@@ -532,18 +575,21 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
if (!tablep)
return 0; /* Bytes unmapped */
- if (size == split_sz)
- unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
+ if (size == split_sz) {
+ unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
+ max_entries = ptes_per_table - unmap_idx_start;
+ num_entries = min_t(int, pgcount, max_entries);
+ }
blk_paddr = iopte_to_paddr(blk_pte, data);
pte = iopte_prot(blk_pte);
- for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
+ for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
/* Unmap! */
- if (i == unmap_idx)
+ if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
continue;
- __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
+ __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
}
pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
@@ -558,76 +604,85 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
return 0;
tablep = iopte_deref(pte, data);
- } else if (unmap_idx >= 0) {
- io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
- return size;
+ } else if (unmap_idx_start >= 0) {
+ for (i = 0; i < num_entries; i++)
+ io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
+
+ return num_entries * size;
}
- return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
+ return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
}
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t size, int lvl,
- arm_lpae_iopte *ptep)
+ unsigned long iova, size_t size, size_t pgcount,
+ int lvl, arm_lpae_iopte *ptep)
{
arm_lpae_iopte pte;
struct io_pgtable *iop = &data->iop;
+ int i = 0, num_entries, max_entries, unmap_idx_start;
/* Something went horribly wrong and we ran out of page table */
if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
return 0;
- ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
+ unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
+ ptep += unmap_idx_start;
pte = READ_ONCE(*ptep);
if (WARN_ON(!pte))
return 0;
/* If the size matches this level, we're in the right place */
if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
- __arm_lpae_set_pte(ptep, 0, &iop->cfg);
-
- if (!iopte_leaf(pte, lvl, iop->fmt)) {
- /* Also flush any partial walks */
- io_pgtable_tlb_flush_walk(iop, iova, size,
- ARM_LPAE_GRANULE(data));
- ptep = iopte_deref(pte, data);
- __arm_lpae_free_pgtable(data, lvl + 1, ptep);
- } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
- /*
- * Order the PTE update against queueing the IOVA, to
- * guarantee that a flush callback from a different CPU
- * has observed it before the TLBIALL can be issued.
- */
- smp_wmb();
- } else {
- io_pgtable_tlb_add_page(iop, gather, iova, size);
+ max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
+ num_entries = min_t(int, pgcount, max_entries);
+
+ while (i < num_entries) {
+ pte = READ_ONCE(*ptep);
+ if (WARN_ON(!pte))
+ break;
+
+ __arm_lpae_clear_pte(ptep, &iop->cfg);
+
+ if (!iopte_leaf(pte, lvl, iop->fmt)) {
+ /* Also flush any partial walks */
+ io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
+ ARM_LPAE_GRANULE(data));
+ __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
+ } else if (!iommu_iotlb_gather_queued(gather)) {
+ io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
+ }
+
+ ptep++;
+ i++;
}
- return size;
+ return i * size;
} else if (iopte_leaf(pte, lvl, iop->fmt)) {
/*
* Insert a table at the next level to map the old region,
* minus the part we want to unmap
*/
return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
- lvl + 1, ptep);
+ lvl + 1, ptep, pgcount);
}
/* Keep on walkin' */
ptep = iopte_deref(pte, data);
- return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
+ return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
}
-static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte *ptep = data->pgd;
long iaext = (s64)iova >> cfg->ias;
- if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
+ if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
return 0;
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
@@ -635,7 +690,14 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(iaext))
return 0;
- return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
+ return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
+ data->start_level, ptep);
+}
+
+static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *gather)
+{
+ return arm_lpae_unmap_pages(ops, iova, size, 1, gather);
}
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
@@ -750,7 +812,9 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
data->iop.ops = (struct io_pgtable_ops) {
.map = arm_lpae_map,
+ .map_pages = arm_lpae_map_pages,
.unmap = arm_lpae_unmap,
+ .unmap_pages = arm_lpae_unmap_pages,
.iova_to_phys = arm_lpae_iova_to_phys,
};
@@ -766,7 +830,6 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
bool tg1;
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
- IO_PGTABLE_QUIRK_NON_STRICT |
IO_PGTABLE_QUIRK_ARM_TTBR1 |
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
return NULL;
@@ -870,7 +933,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
/* The NS quirk doesn't apply at stage 2 */
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
+ if (cfg->quirks)
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -1043,6 +1106,52 @@ out_free_data:
return NULL;
}
+static struct io_pgtable *
+apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct arm_lpae_io_pgtable *data;
+ int i;
+
+ if (cfg->oas > 36)
+ return NULL;
+
+ data = arm_lpae_alloc_pgtable(cfg);
+ if (!data)
+ return NULL;
+
+ /*
+ * The table format itself always uses two levels, but the total VA
+ * space is mapped by four separate tables, making the MMIO registers
+ * an effective "level 1". For simplicity, though, we treat this
+ * equivalently to LPAE stage 2 concatenation at level 2, with the
+ * additional TTBRs each just pointing at consecutive pages.
+ */
+ if (data->start_level < 1)
+ goto out_free_data;
+ if (data->start_level == 1 && data->pgd_bits > 2)
+ goto out_free_data;
+ if (data->start_level > 1)
+ data->pgd_bits = 0;
+ data->start_level = 2;
+ cfg->apple_dart_cfg.n_ttbrs = 1 << data->pgd_bits;
+ data->pgd_bits += data->bits_per_level;
+
+ data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
+ cfg);
+ if (!data->pgd)
+ goto out_free_data;
+
+ for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i)
+ cfg->apple_dart_cfg.ttbr[i] =
+ virt_to_phys(data->pgd + i * ARM_LPAE_GRANULE(data));
+
+ return &data->iop;
+
+out_free_data:
+ kfree(data);
+ return NULL;
+}
+
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
.alloc = arm_64_lpae_alloc_pgtable_s1,
.free = arm_lpae_free_pgtable,
@@ -1068,6 +1177,11 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
.free = arm_lpae_free_pgtable,
};
+struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = {
+ .alloc = apple_dart_alloc_pgtable,
+ .free = arm_lpae_free_pgtable,
+};
+
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
static struct io_pgtable_cfg *cfg_cookie __initdata;
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 6e9917ce980f..f4bfcef98297 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -20,6 +20,7 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
[ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
[ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
[ARM_MALI_LPAE] = &io_pgtable_arm_mali_lpae_init_fns,
+ [APPLE_DART] = &io_pgtable_apple_dart_init_fns,
#endif
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
[ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 63f0af10c403..3303d707bab4 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -7,7 +7,9 @@
#define pr_fmt(fmt) "iommu: " fmt
#include <linux/device.h>
+#include <linux/dma-iommu.h>
#include <linux/kernel.h>
+#include <linux/bits.h>
#include <linux/bug.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -29,7 +31,7 @@ static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
static unsigned int iommu_def_domain_type __read_mostly;
-static bool iommu_dma_strict __read_mostly = true;
+static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
static u32 iommu_cmd_line __read_mostly;
struct iommu_group {
@@ -113,6 +115,7 @@ static const char *iommu_domain_type_str(unsigned int t)
case IOMMU_DOMAIN_UNMANAGED:
return "Unmanaged";
case IOMMU_DOMAIN_DMA:
+ case IOMMU_DOMAIN_DMA_FQ:
return "Translated";
default:
return "Unknown";
@@ -133,11 +136,20 @@ static int __init iommu_subsys_init(void)
}
}
+ if (!iommu_default_passthrough() && !iommu_dma_strict)
+ iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ;
+
pr_info("Default domain type: %s %s\n",
iommu_domain_type_str(iommu_def_domain_type),
(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ?
"(set via kernel command line)" : "");
+ if (!iommu_default_passthrough())
+ pr_info("DMA domain TLB invalidation policy: %s mode %s\n",
+ iommu_dma_strict ? "strict" : "lazy",
+ (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ?
+ "(set via kernel command line)" : "");
+
return 0;
}
subsys_initcall(iommu_subsys_init);
@@ -273,7 +285,9 @@ int iommu_probe_device(struct device *dev)
* support default domains, so the return value is not yet
* checked.
*/
+ mutex_lock(&group->mutex);
iommu_alloc_default_domain(group, dev);
+ mutex_unlock(&group->mutex);
if (group->default_domain) {
ret = __iommu_attach_device(group->default_domain, dev);
@@ -344,20 +358,12 @@ static int __init iommu_dma_setup(char *str)
}
early_param("iommu.strict", iommu_dma_setup);
-void iommu_set_dma_strict(bool strict)
-{
- if (strict || !(iommu_cmd_line & IOMMU_CMD_LINE_STRICT))
- iommu_dma_strict = strict;
-}
-
-bool iommu_get_dma_strict(struct iommu_domain *domain)
+void iommu_set_dma_strict(void)
{
- /* only allow lazy flushing for DMA domains */
- if (domain->type == IOMMU_DOMAIN_DMA)
- return iommu_dma_strict;
- return true;
+ iommu_dma_strict = true;
+ if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ)
+ iommu_def_domain_type = IOMMU_DOMAIN_DMA;
}
-EXPORT_SYMBOL_GPL(iommu_get_dma_strict);
static ssize_t iommu_group_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
@@ -546,6 +552,9 @@ static ssize_t iommu_group_show_type(struct iommu_group *group,
case IOMMU_DOMAIN_DMA:
type = "DMA\n";
break;
+ case IOMMU_DOMAIN_DMA_FQ:
+ type = "DMA-FQ\n";
+ break;
}
}
mutex_unlock(&group->mutex);
@@ -759,7 +768,7 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
unsigned long pg_size;
int ret = 0;
- if (!domain || domain->type != IOMMU_DOMAIN_DMA)
+ if (!domain || !iommu_is_dma_domain(domain))
return 0;
BUG_ON(!domain->pgsize_bitmap);
@@ -1944,6 +1953,11 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
/* Assume all sizes by default; the driver may override this later */
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
+ /* Temporarily avoid -EEXIST while drivers still get their own cookies */
+ if (iommu_is_dma_domain(domain) && !domain->iova_cookie && iommu_get_dma_cookie(domain)) {
+ iommu_domain_free(domain);
+ domain = NULL;
+ }
return domain;
}
@@ -1955,6 +1969,7 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain)
{
+ iommu_put_dma_cookie(domain);
domain->ops->domain_free(domain);
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
@@ -2370,45 +2385,94 @@ EXPORT_SYMBOL_GPL(iommu_detach_group);
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
{
- if (unlikely(domain->ops->iova_to_phys == NULL))
+ if (domain->type == IOMMU_DOMAIN_IDENTITY)
+ return iova;
+
+ if (domain->type == IOMMU_DOMAIN_BLOCKED)
return 0;
return domain->ops->iova_to_phys(domain, iova);
}
EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
-static size_t iommu_pgsize(struct iommu_domain *domain,
- unsigned long addr_merge, size_t size)
+static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, size_t *count)
{
- unsigned int pgsize_idx;
- size_t pgsize;
+ unsigned int pgsize_idx, pgsize_idx_next;
+ unsigned long pgsizes;
+ size_t offset, pgsize, pgsize_next;
+ unsigned long addr_merge = paddr | iova;
- /* Max page size that still fits into 'size' */
- pgsize_idx = __fls(size);
+ /* Page sizes supported by the hardware and small enough for @size */
+ pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0);
- /* need to consider alignment requirements ? */
- if (likely(addr_merge)) {
- /* Max page size allowed by address */
- unsigned int align_pgsize_idx = __ffs(addr_merge);
- pgsize_idx = min(pgsize_idx, align_pgsize_idx);
- }
+ /* Constrain the page sizes further based on the maximum alignment */
+ if (likely(addr_merge))
+ pgsizes &= GENMASK(__ffs(addr_merge), 0);
- /* build a mask of acceptable page sizes */
- pgsize = (1UL << (pgsize_idx + 1)) - 1;
+ /* Make sure we have at least one suitable page size */
+ BUG_ON(!pgsizes);
- /* throw away page sizes not supported by the hardware */
- pgsize &= domain->pgsize_bitmap;
+ /* Pick the biggest page size remaining */
+ pgsize_idx = __fls(pgsizes);
+ pgsize = BIT(pgsize_idx);
+ if (!count)
+ return pgsize;
- /* make sure we're still sane */
- BUG_ON(!pgsize);
+ /* Find the next biggest support page size, if it exists */
+ pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
+ if (!pgsizes)
+ goto out_set_count;
- /* pick the biggest page */
- pgsize_idx = __fls(pgsize);
- pgsize = 1UL << pgsize_idx;
+ pgsize_idx_next = __ffs(pgsizes);
+ pgsize_next = BIT(pgsize_idx_next);
+ /*
+ * There's no point trying a bigger page size unless the virtual
+ * and physical addresses are similarly offset within the larger page.
+ */
+ if ((iova ^ paddr) & (pgsize_next - 1))
+ goto out_set_count;
+
+ /* Calculate the offset to the next page size alignment boundary */
+ offset = pgsize_next - (addr_merge & (pgsize_next - 1));
+
+ /*
+ * If size is big enough to accommodate the larger page, reduce
+ * the number of smaller pages.
+ */
+ if (offset + pgsize_next <= size)
+ size = offset;
+
+out_set_count:
+ *count = size >> pgsize_idx;
return pgsize;
}
+static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot,
+ gfp_t gfp, size_t *mapped)
+{
+ const struct iommu_ops *ops = domain->ops;
+ size_t pgsize, count;
+ int ret;
+
+ pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
+
+ pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
+ iova, &paddr, pgsize, count);
+
+ if (ops->map_pages) {
+ ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
+ gfp, mapped);
+ } else {
+ ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
+ *mapped = ret ? 0 : pgsize;
+ }
+
+ return ret;
+}
+
static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
@@ -2419,7 +2483,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t orig_paddr = paddr;
int ret = 0;
- if (unlikely(ops->map == NULL ||
+ if (unlikely(!(ops->map || ops->map_pages) ||
domain->pgsize_bitmap == 0UL))
return -ENODEV;
@@ -2443,18 +2507,21 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
while (size) {
- size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
+ size_t mapped = 0;
- pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
- iova, &paddr, pgsize);
- ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
+ ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
+ &mapped);
+ /*
+ * Some pages may have been mapped, even if an error occurred,
+ * so we should account for those so they can be unmapped.
+ */
+ size -= mapped;
if (ret)
break;
- iova += pgsize;
- paddr += pgsize;
- size -= pgsize;
+ iova += mapped;
+ paddr += mapped;
}
/* unroll mapping in case something went wrong */
@@ -2494,6 +2561,19 @@ int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
}
EXPORT_SYMBOL_GPL(iommu_map_atomic);
+static size_t __iommu_unmap_pages(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ struct iommu_iotlb_gather *iotlb_gather)
+{
+ const struct iommu_ops *ops = domain->ops;
+ size_t pgsize, count;
+
+ pgsize = iommu_pgsize(domain, iova, iova, size, &count);
+ return ops->unmap_pages ?
+ ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
+ ops->unmap(domain, iova, pgsize, iotlb_gather);
+}
+
static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
@@ -2503,7 +2583,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long orig_iova = iova;
unsigned int min_pagesz;
- if (unlikely(ops->unmap == NULL ||
+ if (unlikely(!(ops->unmap || ops->unmap_pages) ||
domain->pgsize_bitmap == 0UL))
return 0;
@@ -2531,9 +2611,9 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
* or we hit an area that isn't mapped.
*/
while (unmapped < size) {
- size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
-
- unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
+ unmapped_page = __iommu_unmap_pages(domain, iova,
+ size - unmapped,
+ iotlb_gather);
if (!unmapped_page)
break;
@@ -2570,9 +2650,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
-static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot,
- gfp_t gfp)
+static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot,
+ gfp_t gfp)
{
const struct iommu_ops *ops = domain->ops;
size_t len = 0, mapped = 0;
@@ -2613,19 +2693,18 @@ out_err:
/* undo mappings already done */
iommu_unmap(domain, iova, mapped);
- return 0;
-
+ return ret;
}
-size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot)
+ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
{
might_sleep();
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(iommu_map_sg);
-size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
+ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
@@ -3129,6 +3208,14 @@ static int iommu_change_dev_def_domain(struct iommu_group *group,
goto out;
}
+ /* We can bring up a flush queue without tearing down the domain */
+ if (type == IOMMU_DOMAIN_DMA_FQ && prev_dom->type == IOMMU_DOMAIN_DMA) {
+ ret = iommu_dma_init_fq(prev_dom);
+ if (!ret)
+ prev_dom->type = IOMMU_DOMAIN_DMA_FQ;
+ goto out;
+ }
+
/* Sets group->default_domain to the newly allocated domain */
ret = iommu_group_alloc_default_domain(dev->bus, group, type);
if (ret)
@@ -3169,9 +3256,9 @@ out:
}
/*
- * Changing the default domain through sysfs requires the users to ubind the
- * drivers from the devices in the iommu group. Return failure if this doesn't
- * meet.
+ * Changing the default domain through sysfs requires the users to unbind the
+ * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
+ * transition. Return failure if this isn't met.
*
* We need to consider the race between this and the device release path.
* device_lock(dev) is used here to guarantee that the device release path
@@ -3194,6 +3281,8 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
req_type = IOMMU_DOMAIN_IDENTITY;
else if (sysfs_streq(buf, "DMA"))
req_type = IOMMU_DOMAIN_DMA;
+ else if (sysfs_streq(buf, "DMA-FQ"))
+ req_type = IOMMU_DOMAIN_DMA_FQ;
else if (sysfs_streq(buf, "auto"))
req_type = 0;
else
@@ -3245,7 +3334,8 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
/* Check if the device in the group still has a driver bound to it */
device_lock(dev);
- if (device_is_bound(dev)) {
+ if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ &&
+ group->default_domain->type == IOMMU_DOMAIN_DMA)) {
pr_err_ratelimited("Device is still bound to driver\n");
ret = -EBUSY;
goto out;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b6cf5f16123b..9e8bc802ac05 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -121,8 +121,6 @@ int init_iova_flush_queue(struct iova_domain *iovad,
spin_lock_init(&fq->lock);
}
- smp_wmb();
-
iovad->fq = queue;
timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
@@ -521,6 +519,7 @@ retry:
return new_iova->pfn_lo;
}
+EXPORT_SYMBOL_GPL(alloc_iova_fast);
/**
* free_iova_fast - free iova pfn range into rcache
@@ -538,6 +537,7 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
free_iova(iovad, pfn);
}
+EXPORT_SYMBOL_GPL(free_iova_fast);
#define fq_ring_for_each(i, fq) \
for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
@@ -633,10 +633,20 @@ void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages,
unsigned long data)
{
- struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
+ struct iova_fq *fq;
unsigned long flags;
unsigned idx;
+ /*
+ * Order against the IOMMU driver's pagetable update from unmapping
+ * @pte, to guarantee that iova_domain_flush() observes that if called
+ * from a different CPU before we release the lock below. Full barrier
+ * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
+ * written fq state here.
+ */
+ smp_mb();
+
+ fq = raw_cpu_ptr(iovad->fq);
spin_lock_irqsave(&fq->lock, flags);
/*
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 51ea6f00db2f..d38ff29a76e8 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -8,7 +8,6 @@
#include <linux/bitmap.h>
#include <linux/delay.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -564,10 +563,13 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
* IOMMU Operations
*/
-static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
+static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
{
struct ipmmu_vmsa_domain *domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
+ return NULL;
+
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
@@ -577,27 +579,6 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
return &domain->io_domain;
}
-static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
-{
- struct iommu_domain *io_domain = NULL;
-
- switch (type) {
- case IOMMU_DOMAIN_UNMANAGED:
- io_domain = __ipmmu_domain_alloc(type);
- break;
-
- case IOMMU_DOMAIN_DMA:
- io_domain = __ipmmu_domain_alloc(type);
- if (io_domain && iommu_get_dma_cookie(io_domain)) {
- kfree(io_domain);
- io_domain = NULL;
- }
- break;
- }
-
- return io_domain;
-}
-
static void ipmmu_domain_free(struct iommu_domain *io_domain)
{
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
@@ -606,7 +587,6 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain)
* Free the domain resources. We assume that all devices have already
* been detached.
*/
- iommu_put_dma_cookie(io_domain);
ipmmu_domain_destroy_context(domain);
free_io_pgtable_ops(domain->iop);
kfree(domain);
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 6f7c69688ce2..d837adfd1da5 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -9,7 +9,6 @@
#include <linux/component.h>
#include <linux/device.h>
#include <linux/dma-direct.h>
-#include <linux/dma-iommu.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -441,17 +440,11 @@ static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
if (!dom)
return NULL;
- if (iommu_get_dma_cookie(&dom->domain)) {
- kfree(dom);
- return NULL;
- }
-
return &dom->domain;
}
static void mtk_iommu_domain_free(struct iommu_domain *domain)
{
- iommu_put_dma_cookie(domain);
kfree(to_mtk_domain(domain));
}
@@ -520,12 +513,8 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- unsigned long end = iova + size - 1;
- if (gather->start > iova)
- gather->start = iova;
- if (gather->end < end)
- gather->end = end;
+ iommu_iotlb_gather_add_range(gather, iova, size);
return dom->iop->unmap(dom->iop, iova, size, gather);
}
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 778e66f5f1aa..be22fcf988ce 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -13,7 +13,6 @@
#include <linux/component.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
-#include <linux/dma-iommu.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 9febfb7f3025..5cb260820eda 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -10,7 +10,6 @@
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -1074,10 +1073,6 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
if (!rk_domain)
return NULL;
- if (type == IOMMU_DOMAIN_DMA &&
- iommu_get_dma_cookie(&rk_domain->domain))
- goto err_free_domain;
-
/*
* rk32xx iommus use a 2 level pagetable.
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
@@ -1085,7 +1080,7 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
*/
rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
if (!rk_domain->dt)
- goto err_put_cookie;
+ goto err_free_domain;
rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
SPAGE_SIZE, DMA_TO_DEVICE);
@@ -1106,9 +1101,6 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
err_free_dt:
free_page((unsigned long)rk_domain->dt);
-err_put_cookie:
- if (type == IOMMU_DOMAIN_DMA)
- iommu_put_dma_cookie(&rk_domain->domain);
err_free_domain:
kfree(rk_domain);
@@ -1137,8 +1129,6 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
SPAGE_SIZE, DMA_TO_DEVICE);
free_page((unsigned long)rk_domain->dt);
- if (domain->type == IOMMU_DOMAIN_DMA)
- iommu_put_dma_cookie(&rk_domain->domain);
kfree(rk_domain);
}
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 73dfd9946312..27ac818b0354 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -8,7 +8,6 @@
#include <linux/clk.h>
#include <linux/device.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/iommu.h>
@@ -144,11 +143,6 @@ static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type)
if (!dom)
return NULL;
- if (iommu_get_dma_cookie(&dom->domain)) {
- kfree(dom);
- return NULL;
- }
-
spin_lock_init(&dom->pgtlock);
dom->domain.geometry.aperture_start = 0;
@@ -161,7 +155,6 @@ static void sprd_iommu_domain_free(struct iommu_domain *domain)
{
struct sprd_iommu_domain *dom = to_sprd_domain(domain);
- iommu_put_dma_cookie(domain);
kfree(dom);
}
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 181bb1c3437c..92997021e188 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -7,7 +7,6 @@
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -610,14 +609,10 @@ static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
if (!sun50i_domain)
return NULL;
- if (type == IOMMU_DOMAIN_DMA &&
- iommu_get_dma_cookie(&sun50i_domain->domain))
- goto err_free_domain;
-
sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(DT_SIZE));
if (!sun50i_domain->dt)
- goto err_put_cookie;
+ goto err_free_domain;
refcount_set(&sun50i_domain->refcnt, 1);
@@ -627,10 +622,6 @@ static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
return &sun50i_domain->domain;
-err_put_cookie:
- if (type == IOMMU_DOMAIN_DMA)
- iommu_put_dma_cookie(&sun50i_domain->domain);
-
err_free_domain:
kfree(sun50i_domain);
@@ -644,8 +635,6 @@ static void sun50i_iommu_domain_free(struct iommu_domain *domain)
free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE));
sun50i_domain->dt = NULL;
- iommu_put_dma_cookie(domain);
-
kfree(sun50i_domain);
}
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 6abdcab7273b..80930ce04a16 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -598,12 +598,6 @@ static struct iommu_domain *viommu_domain_alloc(unsigned type)
spin_lock_init(&vdomain->mappings_lock);
vdomain->mappings = RB_ROOT_CACHED;
- if (type == IOMMU_DOMAIN_DMA &&
- iommu_get_dma_cookie(&vdomain->domain)) {
- kfree(vdomain);
- return NULL;
- }
-
return &vdomain->domain;
}
@@ -643,8 +637,6 @@ static void viommu_domain_free(struct iommu_domain *domain)
{
struct viommu_domain *vdomain = to_viommu_domain(domain);
- iommu_put_dma_cookie(domain);
-
/* Free all remaining mappings (size 2^64) */
viommu_del_mappings(vdomain, 0, 0);
diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c
index f26bf3c66d7e..d7ae42edc4a8 100644
--- a/drivers/isdn/capi/capiutil.c
+++ b/drivers/isdn/capi/capiutil.c
@@ -379,7 +379,7 @@ static char *pnames[] =
/*2f */ "Useruserdata"
};
-#include <stdarg.h>
+#include <linux/stdarg.h>
/*-------------------------------------------------------*/
static _cdebbuf *bufprint(_cdebbuf *cdb, char *fmt, ...)
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
index d4759db002c6..dc634c2932fd 100644
--- a/drivers/macintosh/macio-adb.c
+++ b/drivers/macintosh/macio-adb.c
@@ -2,7 +2,6 @@
/*
* Driver for the ADB controller in the Mac I/O (Hydra) chip.
*/
-#include <stdarg.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index 3581abfb0c6a..cd267392289c 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -9,7 +9,7 @@
*
* Copyright (C) 1996 Paul Mackerras.
*/
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index 060e03f2264b..db9270da5b8e 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -23,8 +23,6 @@
* Apple's "ADB Analyzer" bus sniffer is invaluable:
* ftp://ftp.apple.com/developer/Tool_Chest/Devices_-_Hardware/Apple_Desktop_Bus/
*/
-
-#include <stdarg.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 4bdd4c45e7a7..4b98bc26a94b 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -18,7 +18,7 @@
* a sleep or a freq. switch
*
*/
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/errno.h>
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index b4b780ea2ac8..c9fc06c7e685 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -264,7 +264,7 @@ config SPRD_MBOX
you want to build the Spreatrum mailbox controller driver.
config QCOM_IPCC
- bool "Qualcomm Technologies, Inc. IPCC driver"
+ tristate "Qualcomm Technologies, Inc. IPCC driver"
depends on ARCH_QCOM || COMPILE_TEST
help
Qualcomm Technologies, Inc. Inter-Processor Communication Controller
diff --git a/drivers/mailbox/mailbox-sti.c b/drivers/mailbox/mailbox-sti.c
index 2baf69a0b81c..ab3a6ab762d3 100644
--- a/drivers/mailbox/mailbox-sti.c
+++ b/drivers/mailbox/mailbox-sti.c
@@ -36,12 +36,7 @@
#define MBOX_BASE(mdev, inst) ((mdev)->base + ((inst) * 4))
/**
- * STi Mailbox device data
- *
- * An IP Mailbox is currently composed of 4 instances
- * Each instance is currently composed of 32 channels
- * This means that we have 128 channels per Mailbox
- * A channel an be used for TX or RX
+ * struct sti_mbox_device - STi Mailbox device data
*
* @dev: Device to which it is attached
* @mbox: Representation of a communication channel controller
@@ -49,6 +44,11 @@
* @name: Name of the mailbox
* @enabled: Local copy of enabled channels
* @lock: Mutex protecting enabled status
+ *
+ * An IP Mailbox is currently composed of 4 instances
+ * Each instance is currently composed of 32 channels
+ * This means that we have 128 channels per Mailbox
+ * A channel an be used for TX or RX
*/
struct sti_mbox_device {
struct device *dev;
@@ -60,7 +60,7 @@ struct sti_mbox_device {
};
/**
- * STi Mailbox platform specific configuration
+ * struct sti_mbox_pdata - STi Mailbox platform specific configuration
*
* @num_inst: Maximum number of instances in one HW Mailbox
* @num_chan: Maximum number of channel per instance
@@ -71,7 +71,7 @@ struct sti_mbox_pdata {
};
/**
- * STi Mailbox allocated channel information
+ * struct sti_channel - STi Mailbox allocated channel information
*
* @mdev: Pointer to parent Mailbox device
* @instance: Instance number channel resides in
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 67a42b514429..64175a893312 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -19,6 +19,7 @@
#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
+#define CMDQ_GCE_NUM_MAX (2)
#define CMDQ_CURR_IRQ_STATUS 0x10
#define CMDQ_SYNC_TOKEN_UPDATE 0x68
@@ -36,6 +37,8 @@
#define CMDQ_THR_WAIT_TOKEN 0x30
#define CMDQ_THR_PRIORITY 0x40
+#define GCE_GCTL_VALUE 0x48
+
#define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200
#define CMDQ_THR_ENABLED 0x1
#define CMDQ_THR_DISABLED 0x0
@@ -73,14 +76,18 @@ struct cmdq {
u32 thread_nr;
u32 irq_mask;
struct cmdq_thread *thread;
- struct clk *clock;
+ struct clk_bulk_data clocks[CMDQ_GCE_NUM_MAX];
bool suspended;
u8 shift_pa;
+ bool control_by_sw;
+ u32 gce_num;
};
struct gce_plat {
u32 thread_nr;
u8 shift;
+ bool control_by_sw;
+ u32 gce_num;
};
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
@@ -120,11 +127,13 @@ static void cmdq_init(struct cmdq *cmdq)
{
int i;
- WARN_ON(clk_enable(cmdq->clock) < 0);
+ WARN_ON(clk_bulk_enable(cmdq->gce_num, cmdq->clocks));
+ if (cmdq->control_by_sw)
+ writel(0x7, cmdq->base + GCE_GCTL_VALUE);
writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
for (i = 0; i <= CMDQ_MAX_EVENT; i++)
writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
- clk_disable(cmdq->clock);
+ clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
}
static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
@@ -168,7 +177,8 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
dma_sync_single_for_cpu(dev, prev_task->pa_base,
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
- (u64)CMDQ_JUMP_BY_PA << 32 | task->pa_base;
+ (u64)CMDQ_JUMP_BY_PA << 32 |
+ (task->pa_base >> task->cmdq->shift_pa);
dma_sync_single_for_device(dev, prev_task->pa_base,
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
@@ -262,7 +272,7 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
if (list_empty(&thread->task_busy_list)) {
cmdq_thread_disable(cmdq, thread);
- clk_disable(cmdq->clock);
+ clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
}
}
@@ -307,7 +317,7 @@ static int cmdq_suspend(struct device *dev)
if (task_running)
dev_warn(dev, "exist running task(s) in suspend\n");
- clk_unprepare(cmdq->clock);
+ clk_bulk_unprepare(cmdq->gce_num, cmdq->clocks);
return 0;
}
@@ -316,7 +326,7 @@ static int cmdq_resume(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
- WARN_ON(clk_prepare(cmdq->clock) < 0);
+ WARN_ON(clk_bulk_prepare(cmdq->gce_num, cmdq->clocks));
cmdq->suspended = false;
return 0;
}
@@ -325,8 +335,7 @@ static int cmdq_remove(struct platform_device *pdev)
{
struct cmdq *cmdq = platform_get_drvdata(pdev);
- clk_unprepare(cmdq->clock);
-
+ clk_bulk_unprepare(cmdq->gce_num, cmdq->clocks);
return 0;
}
@@ -352,7 +361,8 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
task->pkt = pkt;
if (list_empty(&thread->task_busy_list)) {
- WARN_ON(clk_enable(cmdq->clock) < 0);
+ WARN_ON(clk_bulk_enable(cmdq->gce_num, cmdq->clocks));
+
/*
* The thread reset will clear thread related register to 0,
* including pc, end, priority, irq, suspend and enable. Thus
@@ -424,7 +434,8 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
}
cmdq_thread_disable(cmdq, thread);
- clk_disable(cmdq->clock);
+ clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
+
done:
/*
* The thread->task_busy_list empty means thread already disable. The
@@ -469,7 +480,7 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
cmdq_thread_resume(thread);
cmdq_thread_disable(cmdq, thread);
- clk_disable(cmdq->clock);
+ clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
out:
spin_unlock_irqrestore(&thread->chan->lock, flags);
@@ -518,6 +529,10 @@ static int cmdq_probe(struct platform_device *pdev)
struct cmdq *cmdq;
int err, i;
struct gce_plat *plat_data;
+ struct device_node *phandle = dev->of_node;
+ struct device_node *node;
+ int alias_id = 0;
+ char clk_name[4] = "gce";
cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
@@ -540,6 +555,8 @@ static int cmdq_probe(struct platform_device *pdev)
cmdq->thread_nr = plat_data->thread_nr;
cmdq->shift_pa = plat_data->shift;
+ cmdq->control_by_sw = plat_data->control_by_sw;
+ cmdq->gce_num = plat_data->gce_num;
cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
"mtk_cmdq", cmdq);
@@ -551,10 +568,28 @@ static int cmdq_probe(struct platform_device *pdev)
dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
dev, cmdq->base, cmdq->irq);
- cmdq->clock = devm_clk_get(dev, "gce");
- if (IS_ERR(cmdq->clock)) {
- dev_err(dev, "failed to get gce clk\n");
- return PTR_ERR(cmdq->clock);
+ if (cmdq->gce_num > 1) {
+ for_each_child_of_node(phandle->parent, node) {
+ char clk_id[8];
+
+ alias_id = of_alias_get_id(node, clk_name);
+ if (alias_id < cmdq->gce_num) {
+ snprintf(clk_id, sizeof(clk_id), "%s%d", clk_name, alias_id);
+ cmdq->clocks[alias_id].id = clk_id;
+ cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
+ if (IS_ERR(cmdq->clocks[alias_id].clk)) {
+ dev_err(dev, "failed to get gce clk: %d\n", alias_id);
+ return PTR_ERR(cmdq->clocks[alias_id].clk);
+ }
+ }
+ }
+ } else {
+ cmdq->clocks[alias_id].id = clk_name;
+ cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(cmdq->clocks[alias_id].clk)) {
+ dev_err(dev, "failed to get gce clk\n");
+ return PTR_ERR(cmdq->clocks[alias_id].clk);
+ }
}
cmdq->mbox.dev = dev;
@@ -590,7 +625,8 @@ static int cmdq_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, cmdq);
- WARN_ON(clk_prepare(cmdq->clock) < 0);
+
+ WARN_ON(clk_bulk_prepare(cmdq->gce_num, cmdq->clocks));
cmdq_init(cmdq);
@@ -602,14 +638,47 @@ static const struct dev_pm_ops cmdq_pm_ops = {
.resume = cmdq_resume,
};
-static const struct gce_plat gce_plat_v2 = {.thread_nr = 16};
-static const struct gce_plat gce_plat_v3 = {.thread_nr = 24};
-static const struct gce_plat gce_plat_v4 = {.thread_nr = 24, .shift = 3};
+static const struct gce_plat gce_plat_v2 = {
+ .thread_nr = 16,
+ .shift = 0,
+ .control_by_sw = false,
+ .gce_num = 1
+};
+
+static const struct gce_plat gce_plat_v3 = {
+ .thread_nr = 24,
+ .shift = 0,
+ .control_by_sw = false,
+ .gce_num = 1
+};
+
+static const struct gce_plat gce_plat_v4 = {
+ .thread_nr = 24,
+ .shift = 3,
+ .control_by_sw = false,
+ .gce_num = 1
+};
+
+static const struct gce_plat gce_plat_v5 = {
+ .thread_nr = 24,
+ .shift = 3,
+ .control_by_sw = true,
+ .gce_num = 2
+};
+
+static const struct gce_plat gce_plat_v6 = {
+ .thread_nr = 24,
+ .shift = 3,
+ .control_by_sw = false,
+ .gce_num = 2
+};
static const struct of_device_id cmdq_of_ids[] = {
{.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
{.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
{.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
+ {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_v5},
+ {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_v6},
{}
};
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 03bdc96dc457..82ccfaf14b24 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -163,6 +163,7 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq8074_apcs_data },
{ .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data },
+ { .compatible = "qcom,msm8953-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8994-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
{ .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8998_apcs_data },
@@ -173,6 +174,7 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sm6125-apcs-hmss-global", .data = &sm6125_apcs_data },
{ .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sm6115-apcs-hmss-global", .data = &sdm660_apcs_data },
{ .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
{}
};
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index 584700cd1585..f1d4f4679b17 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -277,6 +277,7 @@ static struct platform_driver qcom_ipcc_driver = {
.driver = {
.name = "qcom-ipcc",
.of_match_table = qcom_ipcc_of_match,
+ .suppress_bind_attrs = true,
},
};
diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c
index 3fd69ab12a8e..2c5edfbd7711 100644
--- a/drivers/md/dm-ima.c
+++ b/drivers/md/dm-ima.c
@@ -136,7 +136,8 @@ static void dm_ima_measure_data(const char *event_name, const void *buf, size_t
if (noio)
noio_flag = memalloc_noio_save();
- ima_measure_critical_data(DM_NAME, event_name, buf, buf_len, false);
+ ima_measure_critical_data(DM_NAME, event_name, buf, buf_len,
+ false, NULL, 0);
if (noio)
memalloc_noio_restore(noio_flag);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index b03eabc1ed7c..2111daaacaba 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -809,14 +809,9 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- int blocksize = *(int *) data, id;
- bool rc;
+ int blocksize = *(int *) data;
- id = dax_read_lock();
- rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
- dax_read_unlock(id);
-
- return rc;
+ return !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
}
/* Check devices support synchronous DAX */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 84e9145b1714..a011d09cb0fa 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -654,7 +654,7 @@ static int open_table_device(struct table_device *td, dev_t dev,
}
td->dm_dev.bdev = bdev;
- td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev);
return 0;
}
diff --git a/drivers/media/i2c/ov02a10.c b/drivers/media/i2c/ov02a10.c
index a3ce5500d355..0f08c05333ea 100644
--- a/drivers/media/i2c/ov02a10.c
+++ b/drivers/media/i2c/ov02a10.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/units.h>
#include <media/media-entity.h>
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
@@ -64,7 +65,6 @@
/* Test pattern control */
#define OV02A10_REG_TEST_PATTERN 0xb6
-#define HZ_PER_MHZ 1000000L
#define OV02A10_LINK_FREQ_390MHZ (390 * HZ_PER_MHZ)
#define OV02A10_ECLK_FREQ (24 * HZ_PER_MHZ)
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 01bb42f0ca0b..ca0edab91aeb 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -2199,5 +2199,33 @@ config MFD_INTEL_M10_BMC
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_RSMU_I2C
+ tristate "Renesas Synchronization Management Unit with I2C"
+ depends on I2C && OF
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Support for the Renesas Synchronization Management Unit, such as
+ Clockmatrix and 82P33XXX series. This option supports I2C as
+ the control interface.
+
+ This driver provides common support for accessing the device.
+ Additional drivers must be enabled in order to use the functionality
+ of the device.
+
+config MFD_RSMU_SPI
+ tristate "Renesas Synchronization Management Unit with SPI"
+ depends on SPI && OF
+ select MFD_CORE
+ select REGMAP_SPI
+ help
+ Support for the Renesas Synchronization Management Unit, such as
+ Clockmatrix and 82P33XXX series. This option supports SPI as
+ the control interface.
+
+ This driver provides common support for accessing the device.
+ Additional drivers must be enabled in order to use the functionality
+ of the device.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 570b9ffb34d0..2ba6646e874c 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -273,3 +273,8 @@ obj-$(CONFIG_MFD_INTEL_M10_BMC) += intel-m10-bmc.o
obj-$(CONFIG_MFD_ATC260X) += atc260x-core.o
obj-$(CONFIG_MFD_ATC260X_I2C) += atc260x-i2c.o
+
+rsmu-i2c-objs := rsmu_core.o rsmu_i2c.o
+rsmu-spi-objs := rsmu_core.o rsmu_spi.o
+obj-$(CONFIG_MFD_RSMU_I2C) += rsmu-i2c.o
+obj-$(CONFIG_MFD_RSMU_SPI) += rsmu-spi.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 30489670ea52..cca0aac26148 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -485,7 +485,7 @@ static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
if (line == AB8540_INT_GPIO43F || line == AB8540_INT_GPIO44F)
line += 1;
- handle_nested_irq(irq_create_mapping(ab8500->domain, line));
+ handle_nested_irq(irq_find_mapping(ab8500->domain, line));
}
return 0;
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 4145a38b3890..8161a5dc68e8 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -125,12 +125,13 @@ static const struct regmap_range axp288_writeable_ranges[] = {
static const struct regmap_range axp288_volatile_ranges[] = {
regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
+ regmap_reg_range(AXP22X_PWR_OUT_CTRL1, AXP22X_ALDO3_V_OUT),
regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
regmap_reg_range(AXP288_BC_DET_STAT, AXP20X_VBUS_IPSOUT_MGMT),
regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
- regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
+ regmap_reg_range(AXP20X_GPIO1_CTRL, AXP22X_GPIO_STATE),
regmap_reg_range(AXP288_RT_BATT_V_H, AXP288_RT_BATT_V_L),
regmap_reg_range(AXP20X_FG_RES, AXP288_FG_CC_CAP_REG),
};
@@ -699,6 +700,18 @@ static const struct resource axp288_charger_resources[] = {
DEFINE_RES_IRQ(AXP288_IRQ_CBTO),
};
+static const char * const axp288_fuel_gauge_suppliers[] = { "axp288_charger" };
+
+static const struct property_entry axp288_fuel_gauge_properties[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", axp288_fuel_gauge_suppliers),
+ { }
+};
+
+static const struct software_node axp288_fuel_gauge_sw_node = {
+ .name = "axp288_fuel_gauge",
+ .properties = axp288_fuel_gauge_properties,
+};
+
static const struct mfd_cell axp288_cells[] = {
{
.name = "axp288_adc",
@@ -716,6 +729,7 @@ static const struct mfd_cell axp288_cells[] = {
.name = "axp288_fuel_gauge",
.num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
.resources = axp288_fuel_gauge_resources,
+ .swnode = &axp288_fuel_gauge_sw_node,
}, {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp288_power_button_resources),
diff --git a/drivers/mfd/dbx500-prcmu-regs.h b/drivers/mfd/db8500-prcmu-regs.h
index 75fd1069372c..75fd1069372c 100644
--- a/drivers/mfd/dbx500-prcmu-regs.h
+++ b/drivers/mfd/db8500-prcmu-regs.h
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 287da20f1231..c1d3e7c116cf 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -37,7 +37,7 @@
#include <linux/regulator/db8500-prcmu.h>
#include <linux/regulator/machine.h>
#include <linux/platform_data/ux500_wdt.h>
-#include "dbx500-prcmu-regs.h"
+#include "db8500-prcmu-regs.h"
/* Index of different voltages to be used when accessing AVSData */
#define PRCM_AVS_BASE 0x2FC
@@ -1622,22 +1622,20 @@ static long round_clock_rate(u8 clock, unsigned long rate)
}
static const unsigned long db8500_armss_freqs[] = {
- 200000000,
- 400000000,
- 800000000,
+ 199680000,
+ 399360000,
+ 798720000,
998400000
};
/* The DB8520 has slightly higher ARMSS max frequency */
static const unsigned long db8520_armss_freqs[] = {
- 200000000,
- 400000000,
- 800000000,
+ 199680000,
+ 399360000,
+ 798720000,
1152000000
};
-
-
static long round_armss_rate(unsigned long rate)
{
unsigned long freq = 0;
@@ -2567,14 +2565,16 @@ static char *fw_project_name(u32 project)
return "U8500 C4";
case PRCMU_FW_PROJECT_U9500_MBL:
return "U9500 MBL";
- case PRCMU_FW_PROJECT_U8500_MBL:
- return "U8500 MBL";
+ case PRCMU_FW_PROJECT_U8500_SSG1:
+ return "U8500 Samsung 1";
case PRCMU_FW_PROJECT_U8500_MBL2:
return "U8500 MBL2";
case PRCMU_FW_PROJECT_U8520:
return "U8520 MBL";
case PRCMU_FW_PROJECT_U8420:
return "U8420";
+ case PRCMU_FW_PROJECT_U8500_SSG2:
+ return "U8500 Samsung 2";
case PRCMU_FW_PROJECT_U8420_SYSCLK:
return "U8420-sysclk";
case PRCMU_FW_PROJECT_U9540:
@@ -2951,14 +2951,13 @@ static const struct mfd_cell common_prcmu_devs[] = {
.pdata_size = sizeof(db8500_wdt_pdata),
.id = -1,
},
+ MFD_CELL_NAME("db8500-cpuidle"),
};
static const struct mfd_cell db8500_prcmu_devs[] = {
MFD_CELL_OF("db8500-prcmu-regulators", NULL,
&db8500_regulators, sizeof(db8500_regulators), 0,
"stericsson,db8500-prcmu-regulator"),
- MFD_CELL_OF("cpuidle-dbx500",
- NULL, NULL, 0, 0, "stericsson,cpuidle-dbx500"),
MFD_CELL_OF("db8500-thermal",
NULL, NULL, 0, 0, "stericsson,db8500-thermal"),
};
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
index 1f396039d58f..3f1d976eb67c 100644
--- a/drivers/mfd/intel-lpss-acpi.c
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -89,6 +89,11 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
.swnode = &apl_i2c_node,
};
+static const struct intel_lpss_platform_info cnl_i2c_info = {
+ .clk_rate = 216000000,
+ .swnode = &spt_i2c_node,
+};
+
static const struct acpi_device_id intel_lpss_acpi_ids[] = {
/* SPT */
{ "INT3440", (kernel_ulong_t)&spt_info },
@@ -102,6 +107,19 @@ static const struct acpi_device_id intel_lpss_acpi_ids[] = {
{ "INT3448", (kernel_ulong_t)&spt_uart_info },
{ "INT3449", (kernel_ulong_t)&spt_uart_info },
{ "INT344A", (kernel_ulong_t)&spt_uart_info },
+ /* CNL */
+ { "INT34B0", (kernel_ulong_t)&spt_info },
+ { "INT34B1", (kernel_ulong_t)&spt_info },
+ { "INT34B2", (kernel_ulong_t)&cnl_i2c_info },
+ { "INT34B3", (kernel_ulong_t)&cnl_i2c_info },
+ { "INT34B4", (kernel_ulong_t)&cnl_i2c_info },
+ { "INT34B5", (kernel_ulong_t)&cnl_i2c_info },
+ { "INT34B6", (kernel_ulong_t)&cnl_i2c_info },
+ { "INT34B7", (kernel_ulong_t)&cnl_i2c_info },
+ { "INT34B8", (kernel_ulong_t)&spt_uart_info },
+ { "INT34B9", (kernel_ulong_t)&spt_uart_info },
+ { "INT34BA", (kernel_ulong_t)&spt_uart_info },
+ { "INT34BC", (kernel_ulong_t)&spt_info },
/* BXT */
{ "80860AAC", (kernel_ulong_t)&bxt_i2c_info },
{ "80860ABC", (kernel_ulong_t)&bxt_info },
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index a9bf10bee796..0e15afc39f54 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -301,7 +301,8 @@ static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
snprintf(name, sizeof(name), "%s-div", devname);
tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
- 0, lpss->priv, 1, 15, 16, 15, 0,
+ CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
+ lpss->priv, 1, 15, 16, 15, 0,
NULL);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
diff --git a/drivers/mfd/intel-m10-bmc.c b/drivers/mfd/intel-m10-bmc.c
index 1a9bfb7f48cd..8db3bcf5fccc 100644
--- a/drivers/mfd/intel-m10-bmc.c
+++ b/drivers/mfd/intel-m10-bmc.c
@@ -15,7 +15,8 @@
enum m10bmc_type {
M10_N3000,
- M10_D5005
+ M10_D5005,
+ M10_N5010,
};
static struct mfd_cell m10bmc_d5005_subdevs[] = {
@@ -28,6 +29,10 @@ static struct mfd_cell m10bmc_pacn3000_subdevs[] = {
{ .name = "n3000bmc-secure" },
};
+static struct mfd_cell m10bmc_n5010_subdevs[] = {
+ { .name = "n5010bmc-hwmon" },
+};
+
static const struct regmap_range m10bmc_regmap_range[] = {
regmap_reg_range(M10BMC_LEGACY_BUILD_VER, M10BMC_LEGACY_BUILD_VER),
regmap_reg_range(M10BMC_SYS_BASE, M10BMC_SYS_END),
@@ -192,6 +197,10 @@ static int intel_m10_bmc_spi_probe(struct spi_device *spi)
cells = m10bmc_d5005_subdevs;
n_cell = ARRAY_SIZE(m10bmc_d5005_subdevs);
break;
+ case M10_N5010:
+ cells = m10bmc_n5010_subdevs;
+ n_cell = ARRAY_SIZE(m10bmc_n5010_subdevs);
+ break;
default:
return -ENODEV;
}
@@ -207,6 +216,7 @@ static int intel_m10_bmc_spi_probe(struct spi_device *spi)
static const struct spi_device_id m10bmc_spi_id[] = {
{ "m10-n3000", M10_N3000 },
{ "m10-d5005", M10_D5005 },
+ { "m10-n5010", M10_N5010 },
{ }
};
MODULE_DEVICE_TABLE(spi, m10bmc_spi_id);
diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
index 01935ae4e9e1..9b9c76bd067b 100644
--- a/drivers/mfd/intel_quark_i2c_gpio.c
+++ b/drivers/mfd/intel_quark_i2c_gpio.c
@@ -17,7 +17,6 @@
#include <linux/clk-provider.h>
#include <linux/dmi.h>
#include <linux/i2c.h>
-#include <linux/platform_data/gpio-dwapb.h>
#include <linux/property.h>
/* PCI BAR for register base address */
@@ -28,15 +27,6 @@
#define MFD_ACPI_MATCH_GPIO 0ULL
#define MFD_ACPI_MATCH_I2C 1ULL
-/* The base GPIO number under GPIOLIB framework */
-#define INTEL_QUARK_MFD_GPIO_BASE 8
-
-/* The default number of South-Cluster GPIO on Quark. */
-#define INTEL_QUARK_MFD_NGPIO 8
-
-/* The DesignWare GPIO ports on Quark. */
-#define INTEL_QUARK_GPIO_NPORTS 1
-
#define INTEL_QUARK_IORES_MEM 0
#define INTEL_QUARK_IORES_IRQ 1
@@ -111,12 +101,38 @@ static struct resource intel_quark_gpio_res[] = {
[INTEL_QUARK_IORES_MEM] = {
.flags = IORESOURCE_MEM,
},
+ [INTEL_QUARK_IORES_IRQ] = {
+ .flags = IORESOURCE_IRQ,
+ },
};
static struct mfd_cell_acpi_match intel_quark_acpi_match_gpio = {
.adr = MFD_ACPI_MATCH_GPIO,
};
+static const struct software_node intel_quark_gpio_controller_node = {
+ .name = "intel-quark-gpio-controller",
+};
+
+static const struct property_entry intel_quark_gpio_portA_properties[] = {
+ PROPERTY_ENTRY_U32("reg", 0),
+ PROPERTY_ENTRY_U32("snps,nr-gpios", 8),
+ PROPERTY_ENTRY_U32("gpio-base", 8),
+ { }
+};
+
+static const struct software_node intel_quark_gpio_portA_node = {
+ .name = "portA",
+ .parent = &intel_quark_gpio_controller_node,
+ .properties = intel_quark_gpio_portA_properties,
+};
+
+static const struct software_node *intel_quark_gpio_node_group[] = {
+ &intel_quark_gpio_controller_node,
+ &intel_quark_gpio_portA_node,
+ NULL
+};
+
static struct mfd_cell intel_quark_mfd_cells[] = {
[MFD_I2C_BAR] = {
.id = MFD_I2C_BAR,
@@ -203,35 +219,19 @@ static int intel_quark_gpio_setup(struct pci_dev *pdev)
{
struct mfd_cell *cell = &intel_quark_mfd_cells[MFD_GPIO_BAR];
struct resource *res = intel_quark_gpio_res;
- struct dwapb_platform_data *pdata;
- struct device *dev = &pdev->dev;
+ int ret;
res[INTEL_QUARK_IORES_MEM].start = pci_resource_start(pdev, MFD_GPIO_BAR);
res[INTEL_QUARK_IORES_MEM].end = pci_resource_end(pdev, MFD_GPIO_BAR);
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- /* For intel quark x1000, it has only one port: portA */
- pdata->nports = INTEL_QUARK_GPIO_NPORTS;
- pdata->properties = devm_kcalloc(dev, pdata->nports,
- sizeof(*pdata->properties),
- GFP_KERNEL);
- if (!pdata->properties)
- return -ENOMEM;
-
- /* Set the properties for portA */
- pdata->properties->fwnode = NULL;
- pdata->properties->idx = 0;
- pdata->properties->ngpio = INTEL_QUARK_MFD_NGPIO;
- pdata->properties->gpio_base = INTEL_QUARK_MFD_GPIO_BASE;
- pdata->properties->irq[0] = pci_irq_vector(pdev, 0);
- pdata->properties->irq_shared = true;
+ res[INTEL_QUARK_IORES_IRQ].start = pci_irq_vector(pdev, 0);
+ res[INTEL_QUARK_IORES_IRQ].end = pci_irq_vector(pdev, 0);
- cell->platform_data = pdata;
- cell->pdata_size = sizeof(*pdata);
+ ret = software_node_register_node_group(intel_quark_gpio_node_group);
+ if (ret)
+ return ret;
+ cell->swnode = &intel_quark_gpio_controller_node;
return 0;
}
@@ -274,10 +274,12 @@ static int intel_quark_mfd_probe(struct pci_dev *pdev,
ARRAY_SIZE(intel_quark_mfd_cells), NULL, 0,
NULL);
if (ret)
- goto err_free_irq_vectors;
+ goto err_unregister_gpio_node_group;
return 0;
+err_unregister_gpio_node_group:
+ software_node_unregister_node_group(intel_quark_gpio_node_group);
err_free_irq_vectors:
pci_free_irq_vectors(pdev);
err_unregister_i2c_clk:
@@ -288,6 +290,7 @@ err_unregister_i2c_clk:
static void intel_quark_mfd_remove(struct pci_dev *pdev)
{
mfd_remove_devices(&pdev->dev);
+ software_node_unregister_node_group(intel_quark_gpio_node_group);
pci_free_irq_vectors(pdev);
intel_quark_unregister_i2c_clk(&pdev->dev);
}
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 3bbb29a7e7a5..f10e53187f67 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -489,6 +489,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
[LPC_DH89XXCC] = {
.name = "DH89xxCC",
.iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
},
[LPC_PPT] = {
.name = "Panther Point",
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index 428a526cbe86..9ab9adce06fd 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -22,7 +22,7 @@
#define SMBASE 0x40
#define SMBUS_IO_SIZE 64
-#define GPIOBASE 0x44
+#define GPIO_BASE 0x44
#define GPIO_IO_SIZE 64
#define GPIO_IO_SIZE_CENTERTON 128
@@ -145,7 +145,7 @@ static int lpc_sch_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (ret == 0)
cells++;
- ret = lpc_sch_populate_cell(dev, GPIOBASE, "sch_gpio",
+ ret = lpc_sch_populate_cell(dev, GPIO_BASE, "sch_gpio",
info->io_size_gpio,
id->device, &lpc_sch_cells[cells]);
if (ret < 0)
diff --git a/drivers/mfd/mt6360-core.c b/drivers/mfd/mt6360-core.c
index e628953548ce..6eaa6775b888 100644
--- a/drivers/mfd/mt6360-core.c
+++ b/drivers/mfd/mt6360-core.c
@@ -319,18 +319,18 @@ static const struct resource mt6360_regulator_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6360_BUCK2_OC_EVT, "buck2_oc_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_BUCK2_OV_EVT, "buck2_ov_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_BUCK2_UV_EVT, "buck2_uv_evt"),
- DEFINE_RES_IRQ_NAMED(MT6360_LDO6_OC_EVT, "ldo6_oc_evt"),
- DEFINE_RES_IRQ_NAMED(MT6360_LDO7_OC_EVT, "ldo7_oc_evt"),
- DEFINE_RES_IRQ_NAMED(MT6360_LDO6_PGB_EVT, "ldo6_pgb_evt"),
- DEFINE_RES_IRQ_NAMED(MT6360_LDO7_PGB_EVT, "ldo7_pgb_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_LDO1_OC_EVT, "ldo1_oc_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_LDO2_OC_EVT, "ldo2_oc_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_LDO3_OC_EVT, "ldo3_oc_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_LDO5_OC_EVT, "ldo5_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO6_OC_EVT, "ldo6_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO7_OC_EVT, "ldo7_oc_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_LDO1_PGB_EVT, "ldo1_pgb_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_LDO2_PGB_EVT, "ldo2_pgb_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_LDO3_PGB_EVT, "ldo3_pgb_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_LDO5_PGB_EVT, "ldo5_pgb_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO6_PGB_EVT, "ldo6_pgb_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO7_PGB_EVT, "ldo7_pgb_evt"),
};
static const struct mfd_cell mt6360_devs[] = {
diff --git a/drivers/mfd/rsmu.h b/drivers/mfd/rsmu.h
new file mode 100644
index 000000000000..bb88597d189f
--- /dev/null
+++ b/drivers/mfd/rsmu.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Renesas Synchronization Management Unit (SMU) devices.
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+
+#ifndef __RSMU_MFD_H
+#define __RSMU_MFD_H
+
+#include <linux/mfd/rsmu.h>
+
+int rsmu_core_init(struct rsmu_ddata *rsmu);
+void rsmu_core_exit(struct rsmu_ddata *rsmu);
+
+#endif /* __RSMU_MFD_H */
diff --git a/drivers/mfd/rsmu_core.c b/drivers/mfd/rsmu_core.c
new file mode 100644
index 000000000000..29437fd0bd5b
--- /dev/null
+++ b/drivers/mfd/rsmu_core.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Core driver for Renesas Synchronization Management Unit (SMU) devices.
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/rsmu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "rsmu.h"
+
+enum {
+ RSMU_PHC = 0,
+ RSMU_CDEV = 1,
+ RSMU_N_DEVS = 2,
+};
+
+static struct mfd_cell rsmu_cm_devs[] = {
+ [RSMU_PHC] = {
+ .name = "8a3400x-phc",
+ },
+ [RSMU_CDEV] = {
+ .name = "8a3400x-cdev",
+ },
+};
+
+static struct mfd_cell rsmu_sabre_devs[] = {
+ [RSMU_PHC] = {
+ .name = "82p33x1x-phc",
+ },
+ [RSMU_CDEV] = {
+ .name = "82p33x1x-cdev",
+ },
+};
+
+static struct mfd_cell rsmu_sl_devs[] = {
+ [RSMU_PHC] = {
+ .name = "8v19n85x-phc",
+ },
+ [RSMU_CDEV] = {
+ .name = "8v19n85x-cdev",
+ },
+};
+
+int rsmu_core_init(struct rsmu_ddata *rsmu)
+{
+ struct mfd_cell *cells;
+ int ret;
+
+ switch (rsmu->type) {
+ case RSMU_CM:
+ cells = rsmu_cm_devs;
+ break;
+ case RSMU_SABRE:
+ cells = rsmu_sabre_devs;
+ break;
+ case RSMU_SL:
+ cells = rsmu_sl_devs;
+ break;
+ default:
+ dev_err(rsmu->dev, "Unsupported RSMU device type: %d\n", rsmu->type);
+ return -ENODEV;
+ }
+
+ mutex_init(&rsmu->lock);
+
+ ret = devm_mfd_add_devices(rsmu->dev, PLATFORM_DEVID_AUTO, cells,
+ RSMU_N_DEVS, NULL, 0, NULL);
+ if (ret < 0)
+ dev_err(rsmu->dev, "Failed to register sub-devices: %d\n", ret);
+
+ return ret;
+}
+
+void rsmu_core_exit(struct rsmu_ddata *rsmu)
+{
+ mutex_destroy(&rsmu->lock);
+}
+
+MODULE_DESCRIPTION("Renesas SMU core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rsmu_i2c.c b/drivers/mfd/rsmu_i2c.c
new file mode 100644
index 000000000000..dc001c9791c1
--- /dev/null
+++ b/drivers/mfd/rsmu_i2c.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * I2C driver for Renesas Synchronization Management Unit (SMU) devices.
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/rsmu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "rsmu.h"
+
+/*
+ * 16-bit register address: the lower 8 bits of the register address come
+ * from the offset addr byte and the upper 8 bits come from the page register.
+ */
+#define RSMU_CM_PAGE_ADDR 0xFD
+#define RSMU_CM_PAGE_WINDOW 256
+
+/*
+ * 15-bit register address: the lower 7 bits of the register address come
+ * from the offset addr byte and the upper 8 bits come from the page register.
+ */
+#define RSMU_SABRE_PAGE_ADDR 0x7F
+#define RSMU_SABRE_PAGE_WINDOW 128
+
+static const struct regmap_range_cfg rsmu_cm_range_cfg[] = {
+ {
+ .range_min = 0,
+ .range_max = 0xD000,
+ .selector_reg = RSMU_CM_PAGE_ADDR,
+ .selector_mask = 0xFF,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = RSMU_CM_PAGE_WINDOW,
+ }
+};
+
+static const struct regmap_range_cfg rsmu_sabre_range_cfg[] = {
+ {
+ .range_min = 0,
+ .range_max = 0x400,
+ .selector_reg = RSMU_SABRE_PAGE_ADDR,
+ .selector_mask = 0xFF,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = RSMU_SABRE_PAGE_WINDOW,
+ }
+};
+
+static bool rsmu_cm_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RSMU_CM_PAGE_ADDR:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool rsmu_sabre_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RSMU_SABRE_PAGE_ADDR:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static const struct regmap_config rsmu_cm_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xD000,
+ .ranges = rsmu_cm_range_cfg,
+ .num_ranges = ARRAY_SIZE(rsmu_cm_range_cfg),
+ .volatile_reg = rsmu_cm_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
+ .can_multi_write = true,
+};
+
+static const struct regmap_config rsmu_sabre_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x400,
+ .ranges = rsmu_sabre_range_cfg,
+ .num_ranges = ARRAY_SIZE(rsmu_sabre_range_cfg),
+ .volatile_reg = rsmu_sabre_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
+ .can_multi_write = true,
+};
+
+static const struct regmap_config rsmu_sl_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .max_register = 0x339,
+ .cache_type = REGCACHE_NONE,
+ .can_multi_write = true,
+};
+
+static int rsmu_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct regmap_config *cfg;
+ struct rsmu_ddata *rsmu;
+ int ret;
+
+ rsmu = devm_kzalloc(&client->dev, sizeof(*rsmu), GFP_KERNEL);
+ if (!rsmu)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, rsmu);
+
+ rsmu->dev = &client->dev;
+ rsmu->type = (enum rsmu_type)id->driver_data;
+
+ switch (rsmu->type) {
+ case RSMU_CM:
+ cfg = &rsmu_cm_regmap_config;
+ break;
+ case RSMU_SABRE:
+ cfg = &rsmu_sabre_regmap_config;
+ break;
+ case RSMU_SL:
+ cfg = &rsmu_sl_regmap_config;
+ break;
+ default:
+ dev_err(rsmu->dev, "Unsupported RSMU device type: %d\n", rsmu->type);
+ return -ENODEV;
+ }
+ rsmu->regmap = devm_regmap_init_i2c(client, cfg);
+ if (IS_ERR(rsmu->regmap)) {
+ ret = PTR_ERR(rsmu->regmap);
+ dev_err(rsmu->dev, "Failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ return rsmu_core_init(rsmu);
+}
+
+static int rsmu_i2c_remove(struct i2c_client *client)
+{
+ struct rsmu_ddata *rsmu = i2c_get_clientdata(client);
+
+ rsmu_core_exit(rsmu);
+
+ return 0;
+}
+
+static const struct i2c_device_id rsmu_i2c_id[] = {
+ { "8a34000", RSMU_CM },
+ { "8a34001", RSMU_CM },
+ { "82p33810", RSMU_SABRE },
+ { "82p33811", RSMU_SABRE },
+ { "8v19n850", RSMU_SL },
+ { "8v19n851", RSMU_SL },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, rsmu_i2c_id);
+
+static const struct of_device_id rsmu_i2c_of_match[] = {
+ { .compatible = "idt,8a34000", .data = (void *)RSMU_CM },
+ { .compatible = "idt,8a34001", .data = (void *)RSMU_CM },
+ { .compatible = "idt,82p33810", .data = (void *)RSMU_SABRE },
+ { .compatible = "idt,82p33811", .data = (void *)RSMU_SABRE },
+ { .compatible = "idt,8v19n850", .data = (void *)RSMU_SL },
+ { .compatible = "idt,8v19n851", .data = (void *)RSMU_SL },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rsmu_i2c_of_match);
+
+static struct i2c_driver rsmu_i2c_driver = {
+ .driver = {
+ .name = "rsmu-i2c",
+ .of_match_table = of_match_ptr(rsmu_i2c_of_match),
+ },
+ .probe = rsmu_i2c_probe,
+ .remove = rsmu_i2c_remove,
+ .id_table = rsmu_i2c_id,
+};
+
+static int __init rsmu_i2c_init(void)
+{
+ return i2c_add_driver(&rsmu_i2c_driver);
+}
+subsys_initcall(rsmu_i2c_init);
+
+static void __exit rsmu_i2c_exit(void)
+{
+ i2c_del_driver(&rsmu_i2c_driver);
+}
+module_exit(rsmu_i2c_exit);
+
+MODULE_DESCRIPTION("Renesas SMU I2C driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rsmu_spi.c b/drivers/mfd/rsmu_spi.c
new file mode 100644
index 000000000000..fec2b4ec477c
--- /dev/null
+++ b/drivers/mfd/rsmu_spi.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SPI driver for Renesas Synchronization Management Unit (SMU) devices.
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/rsmu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#include "rsmu.h"
+
+#define RSMU_CM_PAGE_ADDR 0x7C
+#define RSMU_SABRE_PAGE_ADDR 0x7F
+#define RSMU_HIGHER_ADDR_MASK 0xFF80
+#define RSMU_HIGHER_ADDR_SHIFT 7
+#define RSMU_LOWER_ADDR_MASK 0x7F
+
+static int rsmu_read_device(struct rsmu_ddata *rsmu, u8 reg, u8 *buf, u16 bytes)
+{
+ struct spi_device *client = to_spi_device(rsmu->dev);
+ struct spi_transfer xfer = {0};
+ struct spi_message msg;
+ u8 cmd[256] = {0};
+ u8 rsp[256] = {0};
+ int ret;
+
+ cmd[0] = reg | 0x80;
+ xfer.rx_buf = rsp;
+ xfer.len = bytes + 1;
+ xfer.tx_buf = cmd;
+ xfer.bits_per_word = client->bits_per_word;
+ xfer.speed_hz = client->max_speed_hz;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ /*
+ * 4-wire SPI is a shift register, so for every byte you send,
+ * you get one back at the same time. Example read from 0xC024,
+ * which has value of 0x2D
+ *
+ * MOSI:
+ * 7C 00 C0 #Set page register
+ * A4 00 #MSB is set, so this is read command
+ * MISO:
+ * XX 2D #XX is a dummy byte from sending A4 and we
+ * need to throw it away
+ */
+ ret = spi_sync(client, &msg);
+ if (ret >= 0)
+ memcpy(buf, &rsp[1], xfer.len-1);
+
+ return ret;
+}
+
+static int rsmu_write_device(struct rsmu_ddata *rsmu, u8 reg, u8 *buf, u16 bytes)
+{
+ struct spi_device *client = to_spi_device(rsmu->dev);
+ struct spi_transfer xfer = {0};
+ struct spi_message msg;
+ u8 cmd[256] = {0};
+
+ cmd[0] = reg;
+ memcpy(&cmd[1], buf, bytes);
+
+ xfer.len = bytes + 1;
+ xfer.tx_buf = cmd;
+ xfer.bits_per_word = client->bits_per_word;
+ xfer.speed_hz = client->max_speed_hz;
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(client, &msg);
+}
+
+/*
+ * 1-byte (1B) offset addressing:
+ * 16-bit register address: the lower 7 bits of the register address come
+ * from the offset addr byte and the upper 9 bits come from the page register.
+ */
+static int rsmu_write_page_register(struct rsmu_ddata *rsmu, u16 reg)
+{
+ u8 page_reg;
+ u8 buf[2];
+ u16 bytes;
+ u16 page;
+ int err;
+
+ switch (rsmu->type) {
+ case RSMU_CM:
+ page_reg = RSMU_CM_PAGE_ADDR;
+ page = reg & RSMU_HIGHER_ADDR_MASK;
+ buf[0] = (u8)(page & 0xff);
+ buf[1] = (u8)((page >> 8) & 0xff);
+ bytes = 2;
+ break;
+ case RSMU_SABRE:
+ page_reg = RSMU_SABRE_PAGE_ADDR;
+ page = reg >> RSMU_HIGHER_ADDR_SHIFT;
+ buf[0] = (u8)(page & 0xff);
+ bytes = 1;
+ break;
+ default:
+ dev_err(rsmu->dev, "Unsupported RSMU device type: %d\n", rsmu->type);
+ return -ENODEV;
+ }
+
+ /* Simply return if we are on the same page */
+ if (rsmu->page == page)
+ return 0;
+
+ err = rsmu_write_device(rsmu, page_reg, buf, bytes);
+ if (err)
+ dev_err(rsmu->dev, "Failed to set page offset 0x%x\n", page);
+ else
+ /* Remember the last page */
+ rsmu->page = page;
+
+ return err;
+}
+
+static int rsmu_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct rsmu_ddata *rsmu = spi_get_drvdata((struct spi_device *)context);
+ u8 addr = (u8)(reg & RSMU_LOWER_ADDR_MASK);
+ int err;
+
+ err = rsmu_write_page_register(rsmu, reg);
+ if (err)
+ return err;
+
+ err = rsmu_read_device(rsmu, addr, (u8 *)val, 1);
+ if (err)
+ dev_err(rsmu->dev, "Failed to read offset address 0x%x\n", addr);
+
+ return err;
+}
+
+static int rsmu_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct rsmu_ddata *rsmu = spi_get_drvdata((struct spi_device *)context);
+ u8 addr = (u8)(reg & RSMU_LOWER_ADDR_MASK);
+ u8 data = (u8)val;
+ int err;
+
+ err = rsmu_write_page_register(rsmu, reg);
+ if (err)
+ return err;
+
+ err = rsmu_write_device(rsmu, addr, &data, 1);
+ if (err)
+ dev_err(rsmu->dev,
+ "Failed to write offset address 0x%x\n", addr);
+
+ return err;
+}
+
+static const struct regmap_config rsmu_cm_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0xD000,
+ .reg_read = rsmu_reg_read,
+ .reg_write = rsmu_reg_write,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const struct regmap_config rsmu_sabre_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0x400,
+ .reg_read = rsmu_reg_read,
+ .reg_write = rsmu_reg_write,
+ .cache_type = REGCACHE_NONE,
+};
+
+static int rsmu_spi_probe(struct spi_device *client)
+{
+ const struct spi_device_id *id = spi_get_device_id(client);
+ const struct regmap_config *cfg;
+ struct rsmu_ddata *rsmu;
+ int ret;
+
+ rsmu = devm_kzalloc(&client->dev, sizeof(*rsmu), GFP_KERNEL);
+ if (!rsmu)
+ return -ENOMEM;
+
+ spi_set_drvdata(client, rsmu);
+
+ rsmu->dev = &client->dev;
+ rsmu->type = (enum rsmu_type)id->driver_data;
+
+ /* Initialize regmap */
+ switch (rsmu->type) {
+ case RSMU_CM:
+ cfg = &rsmu_cm_regmap_config;
+ break;
+ case RSMU_SABRE:
+ cfg = &rsmu_sabre_regmap_config;
+ break;
+ default:
+ dev_err(rsmu->dev, "Unsupported RSMU device type: %d\n", rsmu->type);
+ return -ENODEV;
+ }
+
+ rsmu->regmap = devm_regmap_init(&client->dev, NULL, client, cfg);
+ if (IS_ERR(rsmu->regmap)) {
+ ret = PTR_ERR(rsmu->regmap);
+ dev_err(rsmu->dev, "Failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ return rsmu_core_init(rsmu);
+}
+
+static int rsmu_spi_remove(struct spi_device *client)
+{
+ struct rsmu_ddata *rsmu = spi_get_drvdata(client);
+
+ rsmu_core_exit(rsmu);
+
+ return 0;
+}
+
+static const struct spi_device_id rsmu_spi_id[] = {
+ { "8a34000", RSMU_CM },
+ { "8a34001", RSMU_CM },
+ { "82p33810", RSMU_SABRE },
+ { "82p33811", RSMU_SABRE },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, rsmu_spi_id);
+
+static const struct of_device_id rsmu_spi_of_match[] = {
+ { .compatible = "idt,8a34000", .data = (void *)RSMU_CM },
+ { .compatible = "idt,8a34001", .data = (void *)RSMU_CM },
+ { .compatible = "idt,82p33810", .data = (void *)RSMU_SABRE },
+ { .compatible = "idt,82p33811", .data = (void *)RSMU_SABRE },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rsmu_spi_of_match);
+
+static struct spi_driver rsmu_spi_driver = {
+ .driver = {
+ .name = "rsmu-spi",
+ .of_match_table = of_match_ptr(rsmu_spi_of_match),
+ },
+ .probe = rsmu_spi_probe,
+ .remove = rsmu_spi_remove,
+ .id_table = rsmu_spi_id,
+};
+
+static int __init rsmu_spi_init(void)
+{
+ return spi_register_driver(&rsmu_spi_driver);
+}
+subsys_initcall(rsmu_spi_init);
+
+static void __exit rsmu_spi_exit(void)
+{
+ spi_unregister_driver(&rsmu_spi_driver);
+}
+module_exit(rsmu_spi_exit);
+
+MODULE_DESCRIPTION("Renesas SMU SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/simple-mfd-i2c.c b/drivers/mfd/simple-mfd-i2c.c
index 87f684cff9a1..51536691ad9d 100644
--- a/drivers/mfd/simple-mfd-i2c.c
+++ b/drivers/mfd/simple-mfd-i2c.c
@@ -2,39 +2,64 @@
/*
* Simple MFD - I2C
*
+ * Author(s):
+ * Michael Walle <michael@walle.cc>
+ * Lee Jones <lee.jones@linaro.org>
+ *
* This driver creates a single register map with the intention for it to be
* shared by all sub-devices. Children can use their parent's device structure
* (dev.parent) in order to reference it.
*
* Once the register map has been successfully initialised, any sub-devices
- * represented by child nodes in Device Tree will be subsequently registered.
+ * represented by child nodes in Device Tree or via the MFD cells in this file
+ * will be subsequently registered.
*/
#include <linux/i2c.h>
#include <linux/kernel.h>
+#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
-static const struct regmap_config simple_regmap_config = {
+#include "simple-mfd-i2c.h"
+
+static const struct regmap_config regmap_config_8r_8v = {
.reg_bits = 8,
.val_bits = 8,
};
static int simple_mfd_i2c_probe(struct i2c_client *i2c)
{
- const struct regmap_config *config;
+ const struct simple_mfd_data *simple_mfd_data;
+ const struct regmap_config *regmap_config;
struct regmap *regmap;
+ int ret;
+
+ simple_mfd_data = device_get_match_data(&i2c->dev);
- config = device_get_match_data(&i2c->dev);
- if (!config)
- config = &simple_regmap_config;
+ /* If no regmap_config is specified, use the default 8reg and 8val bits */
+ if (!simple_mfd_data || !simple_mfd_data->regmap_config)
+ regmap_config = &regmap_config_8r_8v;
+ else
+ regmap_config = simple_mfd_data->regmap_config;
- regmap = devm_regmap_init_i2c(i2c, config);
+ regmap = devm_regmap_init_i2c(i2c, regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- return devm_of_platform_populate(&i2c->dev);
+ /* If no MFD cells are spedified, use register the DT child nodes instead */
+ if (!simple_mfd_data || !simple_mfd_data->mfd_cell)
+ return devm_of_platform_populate(&i2c->dev);
+
+ ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO,
+ simple_mfd_data->mfd_cell,
+ simple_mfd_data->mfd_cell_size,
+ NULL, 0, NULL);
+ if (ret)
+ dev_err(&i2c->dev, "Failed to add child devices\n");
+
+ return ret;
}
static const struct of_device_id simple_mfd_i2c_of_match[] = {
diff --git a/drivers/mfd/simple-mfd-i2c.h b/drivers/mfd/simple-mfd-i2c.h
new file mode 100644
index 000000000000..7cb2bdd347d9
--- /dev/null
+++ b/drivers/mfd/simple-mfd-i2c.h
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Simple MFD - I2C
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ *
+ * This driver creates a single register map with the intention for it to be
+ * shared by all sub-devices. Children can use their parent's device structure
+ * (dev.parent) in order to reference it.
+ *
+ * This driver creates a single register map with the intention for it to be
+ * shared by all sub-devices. Children can use their parent's device structure
+ * (dev.parent) in order to reference it.
+ *
+ * Once the register map has been successfully initialised, any sub-devices
+ * represented by child nodes in Device Tree or via the MFD cells in the
+ * associated C file will be subsequently registered.
+ */
+
+#ifndef __MFD_SIMPLE_MFD_I2C_H
+#define __MFD_SIMPLE_MFD_I2C_H
+
+#include <linux/mfd/core.h>
+#include <linux/regmap.h>
+
+struct simple_mfd_data {
+ const struct regmap_config *regmap_config;
+ const struct mfd_cell *mfd_cell;
+ size_t mfd_cell_size;
+};
+
+#endif /* __MFD_SIMPLE_MFD_I2C_H */
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 1dd39483e7c1..58d09c615e67 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -1095,7 +1095,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
if (variant->id_val == STMPE801_ID ||
variant->id_val == STMPE1600_ID) {
- int base = irq_create_mapping(stmpe->domain, 0);
+ int base = irq_find_mapping(stmpe->domain, 0);
handle_nested_irq(base);
return IRQ_HANDLED;
@@ -1123,7 +1123,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
while (status) {
int bit = __ffs(status);
int line = bank * 8 + bit;
- int nestedirq = irq_create_mapping(stmpe->domain, line);
+ int nestedirq = irq_find_mapping(stmpe->domain, line);
handle_nested_irq(nestedirq);
status &= ~(1 << bit);
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 765c0210cb52..191fdb87c424 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -60,7 +60,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
goto err_map;
}
- base = ioremap(res.start, resource_size(&res));
+ base = of_iomap(np, 0);
if (!base) {
ret = -ENOMEM;
goto err_map;
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 7614f8fe0e91..13583cdb93b6 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -187,7 +187,7 @@ again:
while (status) {
int bit = __ffs(status);
- int virq = irq_create_mapping(tc3589x->domain, bit);
+ int virq = irq_find_mapping(tc3589x->domain, bit);
handle_nested_irq(virq);
status &= ~(1 << bit);
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 0e6e25308190..55adc379f94b 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -175,10 +175,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
tscadc->dev = &pdev->dev;
err = platform_get_irq(pdev, 0);
- if (err < 0) {
- dev_err(&pdev->dev, "no irq ID is specified.\n");
+ if (err < 0)
goto ret;
- } else
+ else
tscadc->irq = err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c
index 341466ef20cc..3bd5728844a0 100644
--- a/drivers/mfd/tps65086.c
+++ b/drivers/mfd/tps65086.c
@@ -24,6 +24,7 @@
static const struct mfd_cell tps65086_cells[] = {
{ .name = "tps65086-regulator", },
{ .name = "tps65086-gpio", },
+ { .name = "tps65086-reset", },
};
static const struct regmap_range tps65086_yes_ranges[] = {
@@ -100,29 +101,30 @@ static int tps65086_probe(struct i2c_client *client,
(char)((version & TPS65086_DEVICEID_OTP_MASK) >> 4) + 'A',
(version & TPS65086_DEVICEID_REV_MASK) >> 6);
- ret = regmap_add_irq_chip(tps->regmap, tps->irq, IRQF_ONESHOT, 0,
- &tps65086_irq_chip, &tps->irq_data);
- if (ret) {
- dev_err(tps->dev, "Failed to register IRQ chip\n");
- return ret;
+ if (tps->irq > 0) {
+ ret = regmap_add_irq_chip(tps->regmap, tps->irq, IRQF_ONESHOT, 0,
+ &tps65086_irq_chip, &tps->irq_data);
+ if (ret) {
+ dev_err(tps->dev, "Failed to register IRQ chip\n");
+ return ret;
+ }
}
ret = mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, tps65086_cells,
ARRAY_SIZE(tps65086_cells), NULL, 0,
regmap_irq_get_domain(tps->irq_data));
- if (ret) {
+ if (ret && tps->irq > 0)
regmap_del_irq_chip(tps->irq, tps->irq_data);
- return ret;
- }
- return 0;
+ return ret;
}
static int tps65086_remove(struct i2c_client *client)
{
struct tps65086 *tps = i2c_get_clientdata(client);
- regmap_del_irq_chip(tps->irq, tps->irq_data);
+ if (tps->irq > 0)
+ regmap_del_irq_chip(tps->irq, tps->irq_data);
return 0;
}
diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c
index ddddf08b6a4c..7ae906ff8e35 100644
--- a/drivers/mfd/tqmx86.c
+++ b/drivers/mfd/tqmx86.c
@@ -35,7 +35,11 @@
#define TQMX86_REG_BOARD_ID_E39x 7
#define TQMX86_REG_BOARD_ID_70EB 8
#define TQMX86_REG_BOARD_ID_80UC 9
-#define TQMX86_REG_BOARD_ID_90UC 10
+#define TQMX86_REG_BOARD_ID_110EB 11
+#define TQMX86_REG_BOARD_ID_E40M 12
+#define TQMX86_REG_BOARD_ID_E40S 13
+#define TQMX86_REG_BOARD_ID_E40C1 14
+#define TQMX86_REG_BOARD_ID_E40C2 15
#define TQMX86_REG_BOARD_REV 0x21
#define TQMX86_REG_IO_EXT_INT 0x26
#define TQMX86_REG_IO_EXT_INT_NONE 0
@@ -77,7 +81,7 @@ static struct i2c_board_info tqmx86_i2c_devices[] = {
},
};
-static struct ocores_i2c_platform_data ocores_platfom_data = {
+static struct ocores_i2c_platform_data ocores_platform_data = {
.num_devices = ARRAY_SIZE(tqmx86_i2c_devices),
.devices = tqmx86_i2c_devices,
};
@@ -85,8 +89,8 @@ static struct ocores_i2c_platform_data ocores_platfom_data = {
static const struct mfd_cell tqmx86_i2c_soft_dev[] = {
{
.name = "ocores-i2c",
- .platform_data = &ocores_platfom_data,
- .pdata_size = sizeof(ocores_platfom_data),
+ .platform_data = &ocores_platform_data,
+ .pdata_size = sizeof(ocores_platform_data),
.resources = tqmx_i2c_soft_resources,
.num_resources = ARRAY_SIZE(tqmx_i2c_soft_resources),
},
@@ -128,21 +132,33 @@ static const char *tqmx86_board_id_to_name(u8 board_id)
return "TQMx70EB";
case TQMX86_REG_BOARD_ID_80UC:
return "TQMx80UC";
- case TQMX86_REG_BOARD_ID_90UC:
- return "TQMx90UC";
+ case TQMX86_REG_BOARD_ID_110EB:
+ return "TQMx110EB";
+ case TQMX86_REG_BOARD_ID_E40M:
+ return "TQMxE40M";
+ case TQMX86_REG_BOARD_ID_E40S:
+ return "TQMxE40S";
+ case TQMX86_REG_BOARD_ID_E40C1:
+ return "TQMxE40C1";
+ case TQMX86_REG_BOARD_ID_E40C2:
+ return "TQMxE40C2";
default:
return "Unknown";
}
}
-static int tqmx86_board_id_to_clk_rate(u8 board_id)
+static int tqmx86_board_id_to_clk_rate(struct device *dev, u8 board_id)
{
switch (board_id) {
case TQMX86_REG_BOARD_ID_50UC:
case TQMX86_REG_BOARD_ID_60EB:
case TQMX86_REG_BOARD_ID_70EB:
case TQMX86_REG_BOARD_ID_80UC:
- case TQMX86_REG_BOARD_ID_90UC:
+ case TQMX86_REG_BOARD_ID_110EB:
+ case TQMX86_REG_BOARD_ID_E40M:
+ case TQMX86_REG_BOARD_ID_E40S:
+ case TQMX86_REG_BOARD_ID_E40C1:
+ case TQMX86_REG_BOARD_ID_E40C2:
return 24000;
case TQMX86_REG_BOARD_ID_E39M:
case TQMX86_REG_BOARD_ID_E39C:
@@ -152,7 +168,9 @@ static int tqmx86_board_id_to_clk_rate(u8 board_id)
case TQMX86_REG_BOARD_ID_E38C:
return 33000;
default:
- return 0;
+ dev_warn(dev, "unknown board %d, assuming 24MHz LPC clock\n",
+ board_id);
+ return 24000;
}
}
@@ -209,9 +227,11 @@ static int tqmx86_probe(struct platform_device *pdev)
/* Assumes the IRQ resource is first. */
tqmx_gpio_resources[0].start = gpio_irq;
+ } else {
+ tqmx_gpio_resources[0].flags = 0;
}
- ocores_platfom_data.clock_khz = tqmx86_board_id_to_clk_rate(board_id);
+ ocores_platform_data.clock_khz = tqmx86_board_id_to_clk_rate(dev, board_id);
if (i2c_det == TQMX86_REG_I2C_DETECT_SOFT) {
err = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
@@ -253,6 +273,14 @@ static const struct dmi_system_id tqmx86_dmi_table[] __initconst = {
},
.callback = tqmx86_create_platform_device,
},
+ {
+ .ident = "TQMX86",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TQ-Systems"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TQMx"),
+ },
+ .callback = tqmx86_create_platform_device,
+ },
{}
};
MODULE_DEVICE_TABLE(dmi, tqmx86_dmi_table);
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 6c3a619e2628..651a028bc519 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -154,7 +154,7 @@ static irqreturn_t wm8994_edge_irq(int irq, void *data)
struct wm8994 *wm8994 = data;
while (gpio_get_value_cansleep(wm8994->pdata.irq_gpio))
- handle_nested_irq(irq_create_mapping(wm8994->edge_irq, 0));
+ handle_nested_irq(irq_find_mapping(wm8994->edge_irq, 0));
return IRQ_HANDLED;
}
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
index 5d8b48288cf4..6ebe3c7001ff 100644
--- a/drivers/misc/habanalabs/common/Makefile
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -10,4 +10,5 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/asid.o common/habanalabs_ioctl.o \
common/command_buffer.o common/hw_queue.o common/irq.o \
common/sysfs.o common/hwmon.o common/memory.o \
- common/command_submission.o common/firmware_if.o
+ common/command_submission.o common/firmware_if.o \
+ common/state_dump.o
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index 719168c980a4..8132a84698d5 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -314,8 +314,6 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
spin_lock(&mgr->cb_lock);
rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
- if (rc < 0)
- rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_KERNEL);
spin_unlock(&mgr->cb_lock);
if (rc < 0) {
@@ -552,7 +550,7 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
vma->vm_private_data = cb;
- rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
+ rc = hdev->asic_funcs->mmap(hdev, vma, cb->kernel_address,
cb->bus_address, cb->size);
if (rc) {
spin_lock(&cb->lock);
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index 80c60fb41bbc..7b0516cf808b 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -38,7 +38,11 @@ static void hl_sob_reset(struct kref *ref)
kref);
struct hl_device *hdev = hw_sob->hdev;
+ dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
+
hdev->asic_funcs->reset_sob(hdev, hw_sob);
+
+ hw_sob->need_reset = false;
}
void hl_sob_reset_error(struct kref *ref)
@@ -52,6 +56,24 @@ void hl_sob_reset_error(struct kref *ref)
hw_sob->q_idx, hw_sob->sob_id);
}
+void hw_sob_put(struct hl_hw_sob *hw_sob)
+{
+ if (hw_sob)
+ kref_put(&hw_sob->kref, hl_sob_reset);
+}
+
+static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
+{
+ if (hw_sob)
+ kref_put(&hw_sob->kref, hl_sob_reset_error);
+}
+
+void hw_sob_get(struct hl_hw_sob *hw_sob)
+{
+ if (hw_sob)
+ kref_get(&hw_sob->kref);
+}
+
/**
* hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
* @sob_base: sob base id
@@ -84,76 +106,29 @@ int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
return 0;
}
-static void sob_reset_work(struct work_struct *work)
-{
- struct hl_cs_compl *hl_cs_cmpl =
- container_of(work, struct hl_cs_compl, sob_reset_work);
- struct hl_device *hdev = hl_cs_cmpl->hdev;
-
- /*
- * A signal CS can get completion while the corresponding wait
- * for signal CS is on its way to the PQ. The wait for signal CS
- * will get stuck if the signal CS incremented the SOB to its
- * max value and there are no pending (submitted) waits on this
- * SOB.
- * We do the following to void this situation:
- * 1. The wait for signal CS must get a ref for the signal CS as
- * soon as possible in cs_ioctl_signal_wait() and put it
- * before being submitted to the PQ but after it incremented
- * the SOB refcnt in init_signal_wait_cs().
- * 2. Signal/Wait for signal CS will decrement the SOB refcnt
- * here.
- * These two measures guarantee that the wait for signal CS will
- * reset the SOB upon completion rather than the signal CS and
- * hence the above scenario is avoided.
- */
- kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
-
- if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
- hdev->asic_funcs->reset_sob_group(hdev,
- hl_cs_cmpl->sob_group);
-
- kfree(hl_cs_cmpl);
-}
-
static void hl_fence_release(struct kref *kref)
{
struct hl_fence *fence =
container_of(kref, struct hl_fence, refcount);
struct hl_cs_compl *hl_cs_cmpl =
container_of(fence, struct hl_cs_compl, base_fence);
- struct hl_device *hdev = hl_cs_cmpl->hdev;
- /* EBUSY means the CS was never submitted and hence we don't have
- * an attached hw_sob object that we should handle here
- */
- if (fence->error == -EBUSY)
- goto free;
-
- if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
- (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
- (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)) {
-
- dev_dbg(hdev->dev,
- "CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n",
- hl_cs_cmpl->cs_seq,
- hl_cs_cmpl->type,
- hl_cs_cmpl->hw_sob->sob_id,
- hl_cs_cmpl->sob_val);
-
- queue_work(hdev->sob_reset_wq, &hl_cs_cmpl->sob_reset_work);
-
- return;
- }
-
-free:
kfree(hl_cs_cmpl);
}
void hl_fence_put(struct hl_fence *fence)
{
- if (fence)
- kref_put(&fence->refcount, hl_fence_release);
+ if (IS_ERR_OR_NULL(fence))
+ return;
+ kref_put(&fence->refcount, hl_fence_release);
+}
+
+void hl_fences_put(struct hl_fence **fence, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++, fence++)
+ hl_fence_put(*fence);
}
void hl_fence_get(struct hl_fence *fence)
@@ -473,11 +448,139 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
spin_unlock(&hdev->cs_mirror_lock);
}
+/*
+ * force_complete_multi_cs - complete all contexts that wait on multi-CS
+ *
+ * @hdev: pointer to habanalabs device structure
+ */
+static void force_complete_multi_cs(struct hl_device *hdev)
+{
+ int i;
+
+ for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
+ struct multi_cs_completion *mcs_compl;
+
+ mcs_compl = &hdev->multi_cs_completion[i];
+
+ spin_lock(&mcs_compl->lock);
+
+ if (!mcs_compl->used) {
+ spin_unlock(&mcs_compl->lock);
+ continue;
+ }
+
+ /* when calling force complete no context should be waiting on
+ * multi-cS.
+ * We are calling the function as a protection for such case
+ * to free any pending context and print error message
+ */
+ dev_err(hdev->dev,
+ "multi-CS completion context %d still waiting when calling force completion\n",
+ i);
+ complete_all(&mcs_compl->completion);
+ spin_unlock(&mcs_compl->lock);
+ }
+}
+
+/*
+ * complete_multi_cs - complete all waiting entities on multi-CS
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @cs: CS structure
+ * The function signals a waiting entity that has an overlapping stream masters
+ * with the completed CS.
+ * For example:
+ * - a completed CS worked on stream master QID 4, multi CS completion
+ * is actively waiting on stream master QIDs 3, 5. don't send signal as no
+ * common stream master QID
+ * - a completed CS worked on stream master QID 4, multi CS completion
+ * is actively waiting on stream master QIDs 3, 4. send signal as stream
+ * master QID 4 is common
+ */
+static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
+{
+ struct hl_fence *fence = cs->fence;
+ int i;
+
+ /* in case of multi CS check for completion only for the first CS */
+ if (cs->staged_cs && !cs->staged_first)
+ return;
+
+ for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
+ struct multi_cs_completion *mcs_compl;
+
+ mcs_compl = &hdev->multi_cs_completion[i];
+ if (!mcs_compl->used)
+ continue;
+
+ spin_lock(&mcs_compl->lock);
+
+ /*
+ * complete if:
+ * 1. still waiting for completion
+ * 2. the completed CS has at least one overlapping stream
+ * master with the stream masters in the completion
+ */
+ if (mcs_compl->used &&
+ (fence->stream_master_qid_map &
+ mcs_compl->stream_master_qid_map)) {
+ /* extract the timestamp only of first completed CS */
+ if (!mcs_compl->timestamp)
+ mcs_compl->timestamp =
+ ktime_to_ns(fence->timestamp);
+ complete_all(&mcs_compl->completion);
+ }
+
+ spin_unlock(&mcs_compl->lock);
+ }
+}
+
+static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
+ struct hl_cs *cs,
+ struct hl_cs_compl *hl_cs_cmpl)
+{
+ /* Skip this handler if the cs wasn't submitted, to avoid putting
+ * the hw_sob twice, since this case already handled at this point,
+ * also skip if the hw_sob pointer wasn't set.
+ */
+ if (!hl_cs_cmpl->hw_sob || !cs->submitted)
+ return;
+
+ spin_lock(&hl_cs_cmpl->lock);
+
+ /*
+ * we get refcount upon reservation of signals or signal/wait cs for the
+ * hw_sob object, and need to put it when the first staged cs
+ * (which cotains the encaps signals) or cs signal/wait is completed.
+ */
+ if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
+ (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
+ (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
+ (!!hl_cs_cmpl->encaps_signals)) {
+ dev_dbg(hdev->dev,
+ "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
+ hl_cs_cmpl->cs_seq,
+ hl_cs_cmpl->type,
+ hl_cs_cmpl->hw_sob->sob_id,
+ hl_cs_cmpl->sob_val);
+
+ hw_sob_put(hl_cs_cmpl->hw_sob);
+
+ if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
+ hdev->asic_funcs->reset_sob_group(hdev,
+ hl_cs_cmpl->sob_group);
+ }
+
+ spin_unlock(&hl_cs_cmpl->lock);
+}
+
static void cs_do_release(struct kref *ref)
{
struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
struct hl_device *hdev = cs->ctx->hdev;
struct hl_cs_job *job, *tmp;
+ struct hl_cs_compl *hl_cs_cmpl =
+ container_of(cs->fence, struct hl_cs_compl, base_fence);
cs->completed = true;
@@ -493,8 +596,9 @@ static void cs_do_release(struct kref *ref)
complete_job(hdev, job);
if (!cs->submitted) {
- /* In case the wait for signal CS was submitted, the put occurs
- * in init_signal_wait_cs() or collective_wait_init_cs()
+ /*
+ * In case the wait for signal CS was submitted, the fence put
+ * occurs in init_signal_wait_cs() or collective_wait_init_cs()
* right before hanging on the PQ.
*/
if (cs->type == CS_TYPE_WAIT ||
@@ -535,8 +639,20 @@ static void cs_do_release(struct kref *ref)
list_del(&cs->staged_cs_node);
spin_unlock(&hdev->cs_mirror_lock);
}
+
+ /* decrement refcount to handle when first staged cs
+ * with encaps signals is completed.
+ */
+ if (hl_cs_cmpl->encaps_signals)
+ kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
+ hl_encaps_handle_do_release);
}
+ if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
+ && cs->encaps_signals)
+ kref_put(&cs->encaps_sig_hdl->refcount,
+ hl_encaps_handle_do_release);
+
out:
/* Must be called before hl_ctx_put because inside we use ctx to get
* the device
@@ -566,6 +682,10 @@ out:
if (cs->timestamp)
cs->fence->timestamp = ktime_get();
complete_all(&cs->fence->completion);
+ complete_multi_cs(hdev, cs);
+
+ cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
+
hl_fence_put(cs->fence);
kfree(cs->jobs_in_queue_cnt);
@@ -621,6 +741,10 @@ static void cs_timedout(struct work_struct *work)
break;
}
+ rc = hl_state_dump(hdev);
+ if (rc)
+ dev_err(hdev->dev, "Error during system state dump %d\n", rc);
+
cs_put(cs);
if (likely(!skip_reset_on_timeout)) {
@@ -661,6 +785,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->completed = false;
cs->type = cs_type;
cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
+ cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
cs->timeout_jiffies = timeout;
cs->skip_reset_on_timeout =
hdev->skip_reset_on_timeout ||
@@ -671,9 +796,9 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
kref_init(&cs->refcount);
spin_lock_init(&cs->job_lock);
- cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
+ cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
if (!cs_cmpl)
- cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_KERNEL);
+ cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
if (!cs_cmpl) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
@@ -698,7 +823,6 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs_cmpl->hdev = hdev;
cs_cmpl->type = cs->type;
spin_lock_init(&cs_cmpl->lock);
- INIT_WORK(&cs_cmpl->sob_reset_work, sob_reset_work);
cs->fence = &cs_cmpl->base_fence;
spin_lock(&ctx->cs_lock);
@@ -791,31 +915,22 @@ void hl_cs_rollback_all(struct hl_device *hdev)
cs_rollback(hdev, cs);
cs_put(cs);
}
-}
-
-void hl_pending_cb_list_flush(struct hl_ctx *ctx)
-{
- struct hl_pending_cb *pending_cb, *tmp;
- list_for_each_entry_safe(pending_cb, tmp,
- &ctx->pending_cb_list, cb_node) {
- list_del(&pending_cb->cb_node);
- hl_cb_put(pending_cb->cb);
- kfree(pending_cb);
- }
+ force_complete_multi_cs(hdev);
}
static void
wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
{
struct hl_user_pending_interrupt *pend;
+ unsigned long flags;
- spin_lock(&interrupt->wait_list_lock);
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
list_for_each_entry(pend, &interrupt->wait_list_head, wait_list_node) {
pend->fence.error = -EIO;
complete_all(&pend->fence.completion);
}
- spin_unlock(&interrupt->wait_list_lock);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
}
void hl_release_pending_user_interrupts(struct hl_device *hdev)
@@ -981,6 +1096,10 @@ static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
return CS_TYPE_WAIT;
else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
return CS_TYPE_COLLECTIVE_WAIT;
+ else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
+ return CS_RESERVE_SIGNALS;
+ else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
+ return CS_UNRESERVE_SIGNALS;
else
return CS_TYPE_DEFAULT;
}
@@ -1081,7 +1200,8 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
}
static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
- u64 sequence, u32 flags)
+ u64 sequence, u32 flags,
+ u32 encaps_signal_handle)
{
if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
return 0;
@@ -1093,6 +1213,9 @@ static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
/* Staged CS sequence is the first CS sequence */
INIT_LIST_HEAD(&cs->staged_cs_node);
cs->staged_sequence = cs->sequence;
+
+ if (cs->encaps_signals)
+ cs->encaps_sig_hdl_id = encaps_signal_handle;
} else {
/* User sequence will be validated in 'hl_hw_queue_schedule_cs'
* under the cs_mirror_lock
@@ -1108,9 +1231,20 @@ static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
return 0;
}
+static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
+{
+ int i;
+
+ for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
+ if (qid == hdev->stream_master_qid_arr[i])
+ return BIT(i);
+
+ return 0;
+}
+
static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
u32 num_chunks, u64 *cs_seq, u32 flags,
- u32 timeout)
+ u32 encaps_signals_handle, u32 timeout)
{
bool staged_mid, int_queues_only = true;
struct hl_device *hdev = hpriv->hdev;
@@ -1121,6 +1255,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_cs *cs;
struct hl_cb *cb;
u64 user_sequence;
+ u8 stream_master_qid_map = 0;
int rc, i;
cntr = &hdev->aggregated_cs_counters;
@@ -1148,7 +1283,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
hl_debugfs_add_cs(cs);
- rc = cs_staged_submission(hdev, cs, user_sequence, flags);
+ rc = cs_staged_submission(hdev, cs, user_sequence, flags,
+ encaps_signals_handle);
if (rc)
goto free_cs_object;
@@ -1179,9 +1315,20 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
}
- if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW)
+ if (queue_type == QUEUE_TYPE_EXT ||
+ queue_type == QUEUE_TYPE_HW) {
int_queues_only = false;
+ /*
+ * store which stream are being used for external/HW
+ * queues of this CS
+ */
+ if (hdev->supports_wait_for_multi_cs)
+ stream_master_qid_map |=
+ get_stream_master_qid_mask(hdev,
+ chunk->queue_index);
+ }
+
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
@@ -1242,6 +1389,13 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
goto free_cs_object;
}
+ /*
+ * store the (external/HW queues) streams used by the CS in the
+ * fence object for multi-CS completion
+ */
+ if (hdev->supports_wait_for_multi_cs)
+ cs->fence->stream_master_qid_map = stream_master_qid_map;
+
rc = hl_hw_queue_schedule_cs(cs);
if (rc) {
if (rc != -EAGAIN)
@@ -1270,130 +1424,6 @@ out:
return rc;
}
-static int pending_cb_create_job(struct hl_device *hdev, struct hl_ctx *ctx,
- struct hl_cs *cs, struct hl_cb *cb, u32 size, u32 hw_queue_id)
-{
- struct hw_queue_properties *hw_queue_prop;
- struct hl_cs_counters_atomic *cntr;
- struct hl_cs_job *job;
-
- hw_queue_prop = &hdev->asic_prop.hw_queues_props[hw_queue_id];
- cntr = &hdev->aggregated_cs_counters;
-
- job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true);
- if (!job) {
- atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
- atomic64_inc(&cntr->out_of_mem_drop_cnt);
- dev_err(hdev->dev, "Failed to allocate a new job\n");
- return -ENOMEM;
- }
-
- job->id = 0;
- job->cs = cs;
- job->user_cb = cb;
- atomic_inc(&job->user_cb->cs_cnt);
- job->user_cb_size = size;
- job->hw_queue_id = hw_queue_id;
- job->patched_cb = job->user_cb;
- job->job_cb_size = job->user_cb_size;
-
- /* increment refcount as for external queues we get completion */
- cs_get(cs);
-
- cs->jobs_in_queue_cnt[job->hw_queue_id]++;
-
- list_add_tail(&job->cs_node, &cs->job_list);
-
- hl_debugfs_add_job(hdev, job);
-
- return 0;
-}
-
-static int hl_submit_pending_cb(struct hl_fpriv *hpriv)
-{
- struct hl_device *hdev = hpriv->hdev;
- struct hl_ctx *ctx = hpriv->ctx;
- struct hl_pending_cb *pending_cb, *tmp;
- struct list_head local_cb_list;
- struct hl_cs *cs;
- struct hl_cb *cb;
- u32 hw_queue_id;
- u32 cb_size;
- int process_list, rc = 0;
-
- if (list_empty(&ctx->pending_cb_list))
- return 0;
-
- process_list = atomic_cmpxchg(&ctx->thread_pending_cb_token, 1, 0);
-
- /* Only a single thread is allowed to process the list */
- if (!process_list)
- return 0;
-
- if (list_empty(&ctx->pending_cb_list))
- goto free_pending_cb_token;
-
- /* move all list elements to a local list */
- INIT_LIST_HEAD(&local_cb_list);
- spin_lock(&ctx->pending_cb_lock);
- list_for_each_entry_safe(pending_cb, tmp, &ctx->pending_cb_list,
- cb_node)
- list_move_tail(&pending_cb->cb_node, &local_cb_list);
- spin_unlock(&ctx->pending_cb_lock);
-
- rc = allocate_cs(hdev, ctx, CS_TYPE_DEFAULT, ULLONG_MAX, &cs, 0,
- hdev->timeout_jiffies);
- if (rc)
- goto add_list_elements;
-
- hl_debugfs_add_cs(cs);
-
- /* Iterate through pending cb list, create jobs and add to CS */
- list_for_each_entry(pending_cb, &local_cb_list, cb_node) {
- cb = pending_cb->cb;
- cb_size = pending_cb->cb_size;
- hw_queue_id = pending_cb->hw_queue_id;
-
- rc = pending_cb_create_job(hdev, ctx, cs, cb, cb_size,
- hw_queue_id);
- if (rc)
- goto free_cs_object;
- }
-
- rc = hl_hw_queue_schedule_cs(cs);
- if (rc) {
- if (rc != -EAGAIN)
- dev_err(hdev->dev,
- "Failed to submit CS %d.%llu (%d)\n",
- ctx->asid, cs->sequence, rc);
- goto free_cs_object;
- }
-
- /* pending cb was scheduled successfully */
- list_for_each_entry_safe(pending_cb, tmp, &local_cb_list, cb_node) {
- list_del(&pending_cb->cb_node);
- kfree(pending_cb);
- }
-
- cs_put(cs);
-
- goto free_pending_cb_token;
-
-free_cs_object:
- cs_rollback(hdev, cs);
- cs_put(cs);
-add_list_elements:
- spin_lock(&ctx->pending_cb_lock);
- list_for_each_entry_safe_reverse(pending_cb, tmp, &local_cb_list,
- cb_node)
- list_move(&pending_cb->cb_node, &ctx->pending_cb_list);
- spin_unlock(&ctx->pending_cb_lock);
-free_pending_cb_token:
- atomic_set(&ctx->thread_pending_cb_token, 1);
-
- return rc;
-}
-
static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
u64 *cs_seq)
{
@@ -1443,7 +1473,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
rc = 0;
} else {
rc = cs_ioctl_default(hpriv, chunks, num_chunks,
- cs_seq, 0, hdev->timeout_jiffies);
+ cs_seq, 0, 0, hdev->timeout_jiffies);
}
mutex_unlock(&hpriv->restore_phase_mutex);
@@ -1501,10 +1531,17 @@ out:
* hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
* if the SOB value reaches the max value move to the other SOB reserved
* to the queue.
+ * @hdev: pointer to device structure
+ * @q_idx: stream queue index
+ * @hw_sob: the H/W SOB used in this signal CS.
+ * @count: signals count
+ * @encaps_sig: tells whether it's reservation for encaps signals or not.
+ *
* Note that this function must be called while hw_queues_lock is taken.
*/
int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
- struct hl_hw_sob **hw_sob, u32 count)
+ struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
+
{
struct hl_sync_stream_properties *prop;
struct hl_hw_sob *sob = *hw_sob, *other_sob;
@@ -1512,7 +1549,7 @@ int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
- kref_get(&sob->kref);
+ hw_sob_get(sob);
/* check for wraparound */
if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
@@ -1522,7 +1559,7 @@ int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
* just incremented the refcount right before calling this
* function.
*/
- kref_put(&sob->kref, hl_sob_reset_error);
+ hw_sob_put_err(sob);
/*
* check the other sob value, if it still in use then fail
@@ -1537,12 +1574,42 @@ int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
return -EINVAL;
}
- prop->next_sob_val = 1;
+ /*
+ * next_sob_val always points to the next available signal
+ * in the sob, so in encaps signals it will be the next one
+ * after reserving the required amount.
+ */
+ if (encaps_sig)
+ prop->next_sob_val = count + 1;
+ else
+ prop->next_sob_val = count;
/* only two SOBs are currently in use */
prop->curr_sob_offset = other_sob_offset;
*hw_sob = other_sob;
+ /*
+ * check if other_sob needs reset, then do it before using it
+ * for the reservation or the next signal cs.
+ * we do it here, and for both encaps and regular signal cs
+ * cases in order to avoid possible races of two kref_put
+ * of the sob which can occur at the same time if we move the
+ * sob reset(kref_put) to cs_do_release function.
+ * in addition, if we have combination of cs signal and
+ * encaps, and at the point we need to reset the sob there was
+ * no more reservations and only signal cs keep coming,
+ * in such case we need signal_cs to put the refcount and
+ * reset the sob.
+ */
+ if (other_sob->need_reset)
+ hw_sob_put(other_sob);
+
+ if (encaps_sig) {
+ /* set reset indication for the sob */
+ sob->need_reset = true;
+ hw_sob_get(other_sob);
+ }
+
dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
prop->curr_sob_offset, q_idx);
} else {
@@ -1553,12 +1620,18 @@ int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
}
static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
- struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
+ struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
+ bool encaps_signals)
{
u64 *signal_seq_arr = NULL;
u32 size_to_copy, signal_seq_arr_len;
int rc = 0;
+ if (encaps_signals) {
+ *signal_seq = chunk->encaps_signal_seq;
+ return 0;
+ }
+
signal_seq_arr_len = chunk->num_signal_seq_arr;
/* currently only one signal seq is supported */
@@ -1583,7 +1656,7 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
return -ENOMEM;
}
- size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
+ size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
if (copy_from_user(signal_seq_arr,
u64_to_user_ptr(chunk->signal_seq_arr),
size_to_copy)) {
@@ -1605,8 +1678,8 @@ out:
}
static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
- struct hl_ctx *ctx, struct hl_cs *cs, enum hl_queue_type q_type,
- u32 q_idx)
+ struct hl_ctx *ctx, struct hl_cs *cs,
+ enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
{
struct hl_cs_counters_atomic *cntr;
struct hl_cs_job *job;
@@ -1644,6 +1717,9 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
job->user_cb_size = cb_size;
job->hw_queue_id = q_idx;
+ if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
+ && cs->encaps_signals)
+ job->encaps_sig_wait_offset = encaps_signal_offset;
/*
* No need in parsing, user CB is the patched CB.
* We call hl_cb_destroy() out of two reasons - we don't need the CB in
@@ -1666,11 +1742,196 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
return 0;
}
+static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
+ u32 q_idx, u32 count,
+ u32 *handle_id, u32 *sob_addr,
+ u32 *signals_count)
+{
+ struct hw_queue_properties *hw_queue_prop;
+ struct hl_sync_stream_properties *prop;
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_cs_encaps_sig_handle *handle;
+ struct hl_encaps_signals_mgr *mgr;
+ struct hl_hw_sob *hw_sob;
+ int hdl_id;
+ int rc = 0;
+
+ if (count >= HL_MAX_SOB_VAL) {
+ dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
+ count);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (q_idx >= hdev->asic_prop.max_queues) {
+ dev_err(hdev->dev, "Queue index %d is invalid\n",
+ q_idx);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
+
+ if (!hw_queue_prop->supports_sync_stream) {
+ dev_err(hdev->dev,
+ "Queue index %d does not support sync stream operations\n",
+ q_idx);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ handle->count = count;
+ mgr = &hpriv->ctx->sig_mgr;
+
+ spin_lock(&mgr->lock);
+ hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
+ spin_unlock(&mgr->lock);
+
+ if (hdl_id < 0) {
+ dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ handle->id = hdl_id;
+ handle->q_idx = q_idx;
+ handle->hdev = hdev;
+ kref_init(&handle->refcount);
+
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ hw_sob = &prop->hw_sob[prop->curr_sob_offset];
+
+ /*
+ * Increment the SOB value by count by user request
+ * to reserve those signals
+ * check if the signals amount to reserve is not exceeding the max sob
+ * value, if yes then switch sob.
+ */
+ rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
+ true);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to switch SOB\n");
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+ rc = -EINVAL;
+ goto remove_idr;
+ }
+ /* set the hw_sob to the handle after calling the sob wraparound handler
+ * since sob could have changed.
+ */
+ handle->hw_sob = hw_sob;
+
+ /* store the current sob value for unreserve validity check, and
+ * signal offset support
+ */
+ handle->pre_sob_val = prop->next_sob_val - handle->count;
+
+ *signals_count = prop->next_sob_val;
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ *sob_addr = handle->hw_sob->sob_addr;
+ *handle_id = hdl_id;
+
+ dev_dbg(hdev->dev,
+ "Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
+ hw_sob->sob_id, handle->hw_sob->sob_addr,
+ prop->next_sob_val - 1, q_idx, hdl_id);
+ goto out;
+
+remove_idr:
+ spin_lock(&mgr->lock);
+ idr_remove(&mgr->handles, hdl_id);
+ spin_unlock(&mgr->lock);
+
+ kfree(handle);
+out:
+ return rc;
+}
+
+static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
+{
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
+ struct hl_sync_stream_properties *prop;
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_encaps_signals_mgr *mgr;
+ struct hl_hw_sob *hw_sob;
+ u32 q_idx, sob_addr;
+ int rc = 0;
+
+ mgr = &hpriv->ctx->sig_mgr;
+
+ spin_lock(&mgr->lock);
+ encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
+ if (encaps_sig_hdl) {
+ dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
+ handle_id, encaps_sig_hdl->hw_sob->sob_addr,
+ encaps_sig_hdl->count);
+
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ q_idx = encaps_sig_hdl->q_idx;
+ prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
+ hw_sob = &prop->hw_sob[prop->curr_sob_offset];
+ sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
+
+ /* Check if sob_val got out of sync due to other
+ * signal submission requests which were handled
+ * between the reserve-unreserve calls or SOB switch
+ * upon reaching SOB max value.
+ */
+ if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
+ != prop->next_sob_val ||
+ sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
+ dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
+ encaps_sig_hdl->pre_sob_val,
+ (prop->next_sob_val - encaps_sig_hdl->count));
+
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Decrement the SOB value by count by user request
+ * to unreserve those signals
+ */
+ prop->next_sob_val -= encaps_sig_hdl->count;
+
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ hw_sob_put(hw_sob);
+
+ /* Release the id and free allocated memory of the handle */
+ idr_remove(&mgr->handles, handle_id);
+ kfree(encaps_sig_hdl);
+ } else {
+ rc = -EINVAL;
+ dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
+ }
+out:
+ spin_unlock(&mgr->lock);
+
+ return rc;
+}
+
static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
void __user *chunks, u32 num_chunks,
u64 *cs_seq, u32 flags, u32 timeout)
{
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
+ bool handle_found = false, is_wait_cs = false,
+ wait_cs_submitted = false,
+ cs_encaps_signals = false;
struct hl_cs_chunk *cs_chunk_array, *chunk;
+ bool staged_cs_with_encaps_signals = false;
struct hw_queue_properties *hw_queue_prop;
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_compl *sig_waitcs_cmpl;
@@ -1730,11 +1991,58 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
collective_engine_id = chunk->collective_engine_id;
}
- if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
- rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, ctx);
+ is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
+ cs_type == CS_TYPE_COLLECTIVE_WAIT);
+
+ cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
+
+ if (is_wait_cs) {
+ rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
+ ctx, cs_encaps_signals);
if (rc)
goto free_cs_chunk_array;
+ if (cs_encaps_signals) {
+ /* check if cs sequence has encapsulated
+ * signals handle
+ */
+ struct idr *idp;
+ u32 id;
+
+ spin_lock(&ctx->sig_mgr.lock);
+ idp = &ctx->sig_mgr.handles;
+ idr_for_each_entry(idp, encaps_sig_hdl, id) {
+ if (encaps_sig_hdl->cs_seq == signal_seq) {
+ handle_found = true;
+ /* get refcount to protect removing
+ * this handle from idr, needed when
+ * multiple wait cs are used with offset
+ * to wait on reserved encaps signals.
+ */
+ kref_get(&encaps_sig_hdl->refcount);
+ break;
+ }
+ }
+ spin_unlock(&ctx->sig_mgr.lock);
+
+ if (!handle_found) {
+ dev_err(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
+ signal_seq);
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+
+ /* validate also the signal offset value */
+ if (chunk->encaps_signal_offset >
+ encaps_sig_hdl->count) {
+ dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
+ chunk->encaps_signal_offset,
+ encaps_sig_hdl->count);
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+ }
+
sig_fence = hl_ctx_get_fence(ctx, signal_seq);
if (IS_ERR(sig_fence)) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
@@ -1755,11 +2063,16 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
sig_waitcs_cmpl =
container_of(sig_fence, struct hl_cs_compl, base_fence);
- if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
+ staged_cs_with_encaps_signals = !!
+ (sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
+ (flags & HL_CS_FLAGS_ENCAP_SIGNALS));
+
+ if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
+ !staged_cs_with_encaps_signals) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
- "CS seq 0x%llx is not of a signal CS\n",
+ "CS seq 0x%llx is not of a signal/encaps-signal CS\n",
signal_seq);
hl_fence_put(sig_fence);
rc = -EINVAL;
@@ -1776,18 +2089,27 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
if (rc) {
- if (cs_type == CS_TYPE_WAIT ||
- cs_type == CS_TYPE_COLLECTIVE_WAIT)
+ if (is_wait_cs)
hl_fence_put(sig_fence);
+
goto free_cs_chunk_array;
}
/*
* Save the signal CS fence for later initialization right before
* hanging the wait CS on the queue.
+ * for encaps signals case, we save the cs sequence and handle pointer
+ * for later initialization.
*/
- if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT)
+ if (is_wait_cs) {
cs->signal_fence = sig_fence;
+ /* store the handle pointer, so we don't have to
+ * look for it again, later on the flow
+ * when we need to set SOB info in hw_queue.
+ */
+ if (cs->encaps_signals)
+ cs->encaps_sig_hdl = encaps_sig_hdl;
+ }
hl_debugfs_add_cs(cs);
@@ -1795,10 +2117,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
- q_idx);
+ q_idx, chunk->encaps_signal_offset);
else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
- cs, q_idx, collective_engine_id);
+ cs, q_idx, collective_engine_id,
+ chunk->encaps_signal_offset);
else {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
@@ -1810,7 +2133,13 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
rc = hl_hw_queue_schedule_cs(cs);
if (rc) {
- if (rc != -EAGAIN)
+ /* In case wait cs failed here, it means the signal cs
+ * already completed. we want to free all it's related objects
+ * but we don't want to fail the ioctl.
+ */
+ if (is_wait_cs)
+ rc = 0;
+ else if (rc != -EAGAIN)
dev_err(hdev->dev,
"Failed to submit CS %d.%llu to H/W queues, error %d\n",
ctx->asid, cs->sequence, rc);
@@ -1818,6 +2147,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
}
rc = HL_CS_STATUS_SUCCESS;
+ if (is_wait_cs)
+ wait_cs_submitted = true;
goto put_cs;
free_cs_object:
@@ -1828,6 +2159,10 @@ put_cs:
/* We finished with the CS in this function, so put the ref */
cs_put(cs);
free_cs_chunk_array:
+ if (!wait_cs_submitted && cs_encaps_signals && handle_found &&
+ is_wait_cs)
+ kref_put(&encaps_sig_hdl->refcount,
+ hl_encaps_handle_do_release);
kfree(cs_chunk_array);
out:
return rc;
@@ -1836,10 +2171,11 @@ out:
int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
{
union hl_cs_args *args = data;
- enum hl_cs_type cs_type;
+ enum hl_cs_type cs_type = 0;
u64 cs_seq = ULONG_MAX;
void __user *chunks;
- u32 num_chunks, flags, timeout;
+ u32 num_chunks, flags, timeout,
+ signals_count = 0, sob_addr = 0, handle_id = 0;
int rc;
rc = hl_cs_sanity_checks(hpriv, args);
@@ -1850,10 +2186,6 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
if (rc)
goto out;
- rc = hl_submit_pending_cb(hpriv);
- if (rc)
- goto out;
-
cs_type = hl_cs_get_cs_type(args->in.cs_flags &
~HL_CS_FLAGS_FORCE_RESTORE);
chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
@@ -1876,80 +2208,448 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
&cs_seq, args->in.cs_flags, timeout);
break;
+ case CS_RESERVE_SIGNALS:
+ rc = cs_ioctl_reserve_signals(hpriv,
+ args->in.encaps_signals_q_idx,
+ args->in.encaps_signals_count,
+ &handle_id, &sob_addr, &signals_count);
+ break;
+ case CS_UNRESERVE_SIGNALS:
+ rc = cs_ioctl_unreserve_signals(hpriv,
+ args->in.encaps_sig_handle_id);
+ break;
default:
rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
- args->in.cs_flags, timeout);
+ args->in.cs_flags,
+ args->in.encaps_sig_handle_id,
+ timeout);
break;
}
-
out:
if (rc != -EAGAIN) {
memset(args, 0, sizeof(*args));
+
+ if (cs_type == CS_RESERVE_SIGNALS) {
+ args->out.handle_id = handle_id;
+ args->out.sob_base_addr_offset = sob_addr;
+ args->out.count = signals_count;
+ } else {
+ args->out.seq = cs_seq;
+ }
args->out.status = rc;
- args->out.seq = cs_seq;
}
return rc;
}
+static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
+ enum hl_cs_wait_status *status, u64 timeout_us,
+ s64 *timestamp)
+{
+ struct hl_device *hdev = ctx->hdev;
+ long completion_rc;
+ int rc = 0;
+
+ if (IS_ERR(fence)) {
+ rc = PTR_ERR(fence);
+ if (rc == -EINVAL)
+ dev_notice_ratelimited(hdev->dev,
+ "Can't wait on CS %llu because current CS is at seq %llu\n",
+ seq, ctx->cs_sequence);
+ return rc;
+ }
+
+ if (!fence) {
+ dev_dbg(hdev->dev,
+ "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
+ seq, ctx->cs_sequence);
+
+ *status = CS_WAIT_STATUS_GONE;
+ return 0;
+ }
+
+ if (!timeout_us) {
+ completion_rc = completion_done(&fence->completion);
+ } else {
+ unsigned long timeout;
+
+ timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
+ timeout_us : usecs_to_jiffies(timeout_us);
+ completion_rc =
+ wait_for_completion_interruptible_timeout(
+ &fence->completion, timeout);
+ }
+
+ if (completion_rc > 0) {
+ *status = CS_WAIT_STATUS_COMPLETED;
+ if (timestamp)
+ *timestamp = ktime_to_ns(fence->timestamp);
+ } else {
+ *status = CS_WAIT_STATUS_BUSY;
+ }
+
+ if (fence->error == -ETIMEDOUT)
+ rc = -ETIMEDOUT;
+ else if (fence->error == -EIO)
+ rc = -EIO;
+
+ return rc;
+}
+
+/*
+ * hl_cs_poll_fences - iterate CS fences to check for CS completion
+ *
+ * @mcs_data: multi-CS internal data
+ *
+ * @return 0 on success, otherwise non 0 error code
+ *
+ * The function iterates on all CS sequence in the list and set bit in
+ * completion_bitmap for each completed CS.
+ * while iterating, the function can extracts the stream map to be later
+ * used by the waiting function.
+ * this function shall be called after taking context ref
+ */
+static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
+{
+ struct hl_fence **fence_ptr = mcs_data->fence_arr;
+ struct hl_device *hdev = mcs_data->ctx->hdev;
+ int i, rc, arr_len = mcs_data->arr_len;
+ u64 *seq_arr = mcs_data->seq_arr;
+ ktime_t max_ktime, first_cs_time;
+ enum hl_cs_wait_status status;
+
+ memset(fence_ptr, 0, arr_len * sizeof(*fence_ptr));
+
+ /* get all fences under the same lock */
+ rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
+ if (rc)
+ return rc;
+
+ /*
+ * set to maximum time to verify timestamp is valid: if at the end
+ * this value is maintained- no timestamp was updated
+ */
+ max_ktime = ktime_set(KTIME_SEC_MAX, 0);
+ first_cs_time = max_ktime;
+
+ for (i = 0; i < arr_len; i++, fence_ptr++) {
+ struct hl_fence *fence = *fence_ptr;
+
+ /*
+ * function won't sleep as it is called with timeout 0 (i.e.
+ * poll the fence)
+ */
+ rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence,
+ &status, 0, NULL);
+ if (rc) {
+ dev_err(hdev->dev,
+ "wait_for_fence error :%d for CS seq %llu\n",
+ rc, seq_arr[i]);
+ break;
+ }
+
+ mcs_data->stream_master_qid_map |= fence->stream_master_qid_map;
+
+ if (status == CS_WAIT_STATUS_BUSY)
+ continue;
+
+ mcs_data->completion_bitmap |= BIT(i);
+
+ /*
+ * best effort to extract timestamp. few notes:
+ * - if even single fence is gone we cannot extract timestamp
+ * (as fence not exist anymore)
+ * - for all completed CSs we take the earliest timestamp.
+ * for this we have to validate that:
+ * 1. given timestamp was indeed set
+ * 2. the timestamp is earliest of all timestamps so far
+ */
+
+ if (status == CS_WAIT_STATUS_GONE) {
+ mcs_data->update_ts = false;
+ mcs_data->gone_cs = true;
+ } else if (mcs_data->update_ts &&
+ (ktime_compare(fence->timestamp,
+ ktime_set(0, 0)) > 0) &&
+ (ktime_compare(fence->timestamp, first_cs_time) < 0)) {
+ first_cs_time = fence->timestamp;
+ }
+ }
+
+ hl_fences_put(mcs_data->fence_arr, arr_len);
+
+ if (mcs_data->update_ts &&
+ (ktime_compare(first_cs_time, max_ktime) != 0))
+ mcs_data->timestamp = ktime_to_ns(first_cs_time);
+
+ return rc;
+}
+
static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
u64 timeout_us, u64 seq,
enum hl_cs_wait_status *status, s64 *timestamp)
{
struct hl_fence *fence;
- unsigned long timeout;
int rc = 0;
- long completion_rc;
if (timestamp)
*timestamp = 0;
- if (timeout_us == MAX_SCHEDULE_TIMEOUT)
- timeout = timeout_us;
- else
- timeout = usecs_to_jiffies(timeout_us);
-
hl_ctx_get(hdev, ctx);
fence = hl_ctx_get_fence(ctx, seq);
- if (IS_ERR(fence)) {
- rc = PTR_ERR(fence);
- if (rc == -EINVAL)
- dev_notice_ratelimited(hdev->dev,
- "Can't wait on CS %llu because current CS is at seq %llu\n",
- seq, ctx->cs_sequence);
- } else if (fence) {
- if (!timeout_us)
- completion_rc = completion_done(&fence->completion);
- else
- completion_rc =
- wait_for_completion_interruptible_timeout(
- &fence->completion, timeout);
- if (completion_rc > 0) {
- *status = CS_WAIT_STATUS_COMPLETED;
- if (timestamp)
- *timestamp = ktime_to_ns(fence->timestamp);
- } else {
- *status = CS_WAIT_STATUS_BUSY;
+ rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
+ hl_fence_put(fence);
+ hl_ctx_put(ctx);
+
+ return rc;
+}
+
+/*
+ * hl_wait_multi_cs_completion_init - init completion structure
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @stream_master_bitmap: stream master QIDs map, set bit indicates stream
+ * master QID to wait on
+ *
+ * @return valid completion struct pointer on success, otherwise error pointer
+ *
+ * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
+ * the function gets the first available completion (by marking it "used")
+ * and initialize its values.
+ */
+static struct multi_cs_completion *hl_wait_multi_cs_completion_init(
+ struct hl_device *hdev,
+ u8 stream_master_bitmap)
+{
+ struct multi_cs_completion *mcs_compl;
+ int i;
+
+ /* find free multi_cs completion structure */
+ for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
+ mcs_compl = &hdev->multi_cs_completion[i];
+ spin_lock(&mcs_compl->lock);
+ if (!mcs_compl->used) {
+ mcs_compl->used = 1;
+ mcs_compl->timestamp = 0;
+ mcs_compl->stream_master_qid_map = stream_master_bitmap;
+ reinit_completion(&mcs_compl->completion);
+ spin_unlock(&mcs_compl->lock);
+ break;
}
+ spin_unlock(&mcs_compl->lock);
+ }
- if (fence->error == -ETIMEDOUT)
- rc = -ETIMEDOUT;
- else if (fence->error == -EIO)
- rc = -EIO;
+ if (i == MULTI_CS_MAX_USER_CTX) {
+ dev_err(hdev->dev,
+ "no available multi-CS completion structure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ return mcs_compl;
+}
- hl_fence_put(fence);
- } else {
- dev_dbg(hdev->dev,
- "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
- seq, ctx->cs_sequence);
- *status = CS_WAIT_STATUS_GONE;
+/*
+ * hl_wait_multi_cs_completion_fini - return completion structure and set as
+ * unused
+ *
+ * @mcs_compl: pointer to the completion structure
+ */
+static void hl_wait_multi_cs_completion_fini(
+ struct multi_cs_completion *mcs_compl)
+{
+ /*
+ * free completion structure, do it under lock to be in-sync with the
+ * thread that signals completion
+ */
+ spin_lock(&mcs_compl->lock);
+ mcs_compl->used = 0;
+ spin_unlock(&mcs_compl->lock);
+}
+
+/*
+ * hl_wait_multi_cs_completion - wait for first CS to complete
+ *
+ * @mcs_data: multi-CS internal data
+ *
+ * @return 0 on success, otherwise non 0 error code
+ */
+static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data)
+{
+ struct hl_device *hdev = mcs_data->ctx->hdev;
+ struct multi_cs_completion *mcs_compl;
+ long completion_rc;
+
+ mcs_compl = hl_wait_multi_cs_completion_init(hdev,
+ mcs_data->stream_master_qid_map);
+ if (IS_ERR(mcs_compl))
+ return PTR_ERR(mcs_compl);
+
+ completion_rc = wait_for_completion_interruptible_timeout(
+ &mcs_compl->completion,
+ usecs_to_jiffies(mcs_data->timeout_us));
+
+ /* update timestamp */
+ if (completion_rc > 0)
+ mcs_data->timestamp = mcs_compl->timestamp;
+
+ hl_wait_multi_cs_completion_fini(mcs_compl);
+
+ mcs_data->wait_status = completion_rc;
+
+ return 0;
+}
+
+/*
+ * hl_multi_cs_completion_init - init array of multi-CS completion structures
+ *
+ * @hdev: pointer to habanalabs device structure
+ */
+void hl_multi_cs_completion_init(struct hl_device *hdev)
+{
+ struct multi_cs_completion *mcs_cmpl;
+ int i;
+
+ for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
+ mcs_cmpl = &hdev->multi_cs_completion[i];
+ mcs_cmpl->used = 0;
+ spin_lock_init(&mcs_cmpl->lock);
+ init_completion(&mcs_cmpl->completion);
+ }
+}
+
+/*
+ * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
+ *
+ * @hpriv: pointer to the private data of the fd
+ * @data: pointer to multi-CS wait ioctl in/out args
+ *
+ */
+static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct multi_cs_data mcs_data = {0};
+ union hl_wait_cs_args *args = data;
+ struct hl_ctx *ctx = hpriv->ctx;
+ struct hl_fence **fence_arr;
+ void __user *seq_arr;
+ u32 size_to_copy;
+ u64 *cs_seq_arr;
+ u8 seq_arr_len;
+ int rc;
+
+ if (!hdev->supports_wait_for_multi_cs) {
+ dev_err(hdev->dev, "Wait for multi CS is not supported\n");
+ return -EPERM;
+ }
+
+ seq_arr_len = args->in.seq_arr_len;
+
+ if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
+ dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
+ HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
+ return -EINVAL;
+ }
+
+ /* allocate memory for sequence array */
+ cs_seq_arr =
+ kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
+ if (!cs_seq_arr)
+ return -ENOMEM;
+
+ /* copy CS sequence array from user */
+ seq_arr = (void __user *) (uintptr_t) args->in.seq;
+ size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
+ if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
+ dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
+ rc = -EFAULT;
+ goto free_seq_arr;
+ }
+
+ /* allocate array for the fences */
+ fence_arr = kmalloc_array(seq_arr_len, sizeof(*fence_arr), GFP_KERNEL);
+ if (!fence_arr) {
+ rc = -ENOMEM;
+ goto free_seq_arr;
+ }
+
+ /* initialize the multi-CS internal data */
+ mcs_data.ctx = ctx;
+ mcs_data.seq_arr = cs_seq_arr;
+ mcs_data.fence_arr = fence_arr;
+ mcs_data.arr_len = seq_arr_len;
+
+ hl_ctx_get(hdev, ctx);
+
+ /* poll all CS fences, extract timestamp */
+ mcs_data.update_ts = true;
+ rc = hl_cs_poll_fences(&mcs_data);
+ /*
+ * skip wait for CS completion when one of the below is true:
+ * - an error on the poll function
+ * - one or more CS in the list completed
+ * - the user called ioctl with timeout 0
+ */
+ if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
+ goto put_ctx;
+
+ /* wait (with timeout) for the first CS to be completed */
+ mcs_data.timeout_us = args->in.timeout_us;
+ rc = hl_wait_multi_cs_completion(&mcs_data);
+ if (rc)
+ goto put_ctx;
+
+ if (mcs_data.wait_status > 0) {
+ /*
+ * poll fences once again to update the CS map.
+ * no timestamp should be updated this time.
+ */
+ mcs_data.update_ts = false;
+ rc = hl_cs_poll_fences(&mcs_data);
+
+ /*
+ * if hl_wait_multi_cs_completion returned before timeout (i.e.
+ * it got a completion) we expect to see at least one CS
+ * completed after the poll function.
+ */
+ if (!mcs_data.completion_bitmap) {
+ dev_err(hdev->dev, "Multi-CS got completion on wait but no CS completed\n");
+ rc = -EFAULT;
+ }
}
+put_ctx:
hl_ctx_put(ctx);
+ kfree(fence_arr);
- return rc;
+free_seq_arr:
+ kfree(cs_seq_arr);
+
+ /* update output args */
+ memset(args, 0, sizeof(*args));
+ if (rc)
+ return rc;
+
+ if (mcs_data.completion_bitmap) {
+ args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
+ args->out.cs_completion_map = mcs_data.completion_bitmap;
+
+ /* if timestamp not 0- it's valid */
+ if (mcs_data.timestamp) {
+ args->out.timestamp_nsec = mcs_data.timestamp;
+ args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
+ }
+
+ /* update if some CS was gone */
+ if (mcs_data.timestamp)
+ args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
+ } else if (mcs_data.wait_status == -ERESTARTSYS) {
+ args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
+ } else {
+ args->out.status = HL_WAIT_CS_STATUS_BUSY;
+ }
+
+ return 0;
}
static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
@@ -2015,9 +2715,9 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
{
struct hl_user_pending_interrupt *pend;
struct hl_user_interrupt *interrupt;
- unsigned long timeout;
- long completion_rc;
+ unsigned long timeout, flags;
u32 completion_value;
+ long completion_rc;
int rc = 0;
if (timeout_us == U32_MAX)
@@ -2040,17 +2740,10 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
else
interrupt = &hdev->user_interrupt[interrupt_offset];
- spin_lock(&interrupt->wait_list_lock);
- if (!hl_device_operational(hdev, NULL)) {
- rc = -EPERM;
- goto unlock_and_free_fence;
- }
-
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
- dev_err(hdev->dev,
- "Failed to copy completion value from user\n");
+ dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
- goto unlock_and_free_fence;
+ goto free_fence;
}
if (completion_value >= target_value)
@@ -2059,48 +2752,57 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
*status = CS_WAIT_STATUS_BUSY;
if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
- goto unlock_and_free_fence;
+ goto free_fence;
/* Add pending user interrupt to relevant list for the interrupt
* handler to monitor
*/
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
- spin_unlock(&interrupt->wait_list_lock);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
wait_again:
/* Wait for interrupt handler to signal completion */
- completion_rc =
- wait_for_completion_interruptible_timeout(
- &pend->fence.completion, timeout);
+ completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
+ timeout);
/* If timeout did not expire we need to perform the comparison.
* If comparison fails, keep waiting until timeout expires
*/
if (completion_rc > 0) {
- if (copy_from_user(&completion_value,
- u64_to_user_ptr(user_address), 4)) {
- dev_err(hdev->dev,
- "Failed to copy completion value from user\n");
+ if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
+ dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
+
goto remove_pending_user_interrupt;
}
if (completion_value >= target_value) {
*status = CS_WAIT_STATUS_COMPLETED;
} else {
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ reinit_completion(&pend->fence.completion);
timeout = completion_rc;
+
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
goto wait_again;
}
+ } else if (completion_rc == -ERESTARTSYS) {
+ dev_err_ratelimited(hdev->dev,
+ "user process got signal while waiting for interrupt ID %d\n",
+ interrupt->interrupt_id);
+ *status = HL_WAIT_CS_STATUS_INTERRUPTED;
+ rc = -EINTR;
} else {
*status = CS_WAIT_STATUS_BUSY;
}
remove_pending_user_interrupt:
- spin_lock(&interrupt->wait_list_lock);
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
list_del(&pend->wait_list_node);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
-unlock_and_free_fence:
- spin_unlock(&interrupt->wait_list_lock);
+free_fence:
kfree(pend);
hl_ctx_put(ctx);
@@ -2148,8 +2850,9 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
memset(args, 0, sizeof(*args));
if (rc) {
- dev_err_ratelimited(hdev->dev,
- "interrupt_wait_ioctl failed (%d)\n", rc);
+ if (rc != -EINTR)
+ dev_err_ratelimited(hdev->dev,
+ "interrupt_wait_ioctl failed (%d)\n", rc);
return rc;
}
@@ -2173,8 +2876,16 @@ int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
u32 flags = args->in.flags;
int rc;
+ /* If the device is not operational, no point in waiting for any command submission or
+ * user interrupt
+ */
+ if (!hl_device_operational(hpriv->hdev, NULL))
+ return -EPERM;
+
if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
rc = hl_interrupt_wait_ioctl(hpriv, data);
+ else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
+ rc = hl_multi_cs_wait_ioctl(hpriv, data);
else
rc = hl_cs_wait_ioctl(hpriv, data);
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index 19b6b045219e..22978303ad63 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -9,16 +9,70 @@
#include <linux/slab.h>
+void hl_encaps_handle_do_release(struct kref *ref)
+{
+ struct hl_cs_encaps_sig_handle *handle =
+ container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
+ struct hl_ctx *ctx = handle->hdev->compute_ctx;
+ struct hl_encaps_signals_mgr *mgr = &ctx->sig_mgr;
+
+ spin_lock(&mgr->lock);
+ idr_remove(&mgr->handles, handle->id);
+ spin_unlock(&mgr->lock);
+
+ kfree(handle);
+}
+
+static void hl_encaps_handle_do_release_sob(struct kref *ref)
+{
+ struct hl_cs_encaps_sig_handle *handle =
+ container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
+ struct hl_ctx *ctx = handle->hdev->compute_ctx;
+ struct hl_encaps_signals_mgr *mgr = &ctx->sig_mgr;
+
+ /* if we're here, then there was a signals reservation but cs with
+ * encaps signals wasn't submitted, so need to put refcount
+ * to hw_sob taken at the reservation.
+ */
+ hw_sob_put(handle->hw_sob);
+
+ spin_lock(&mgr->lock);
+ idr_remove(&mgr->handles, handle->id);
+ spin_unlock(&mgr->lock);
+
+ kfree(handle);
+}
+
+static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr)
+{
+ spin_lock_init(&mgr->lock);
+ idr_init(&mgr->handles);
+}
+
+static void hl_encaps_sig_mgr_fini(struct hl_device *hdev,
+ struct hl_encaps_signals_mgr *mgr)
+{
+ struct hl_cs_encaps_sig_handle *handle;
+ struct idr *idp;
+ u32 id;
+
+ idp = &mgr->handles;
+
+ if (!idr_is_empty(idp)) {
+ dev_warn(hdev->dev, "device released while some encaps signals handles are still allocated\n");
+ idr_for_each_entry(idp, handle, id)
+ kref_put(&handle->refcount,
+ hl_encaps_handle_do_release_sob);
+ }
+
+ idr_destroy(&mgr->handles);
+}
+
static void hl_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
int i;
- /* Release all allocated pending cb's, those cb's were never
- * scheduled so it is safe to release them here
- */
- hl_pending_cb_list_flush(ctx);
-
/* Release all allocated HW block mapped list entries and destroy
* the mutex.
*/
@@ -53,6 +107,7 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
hl_cb_va_pool_fini(ctx);
hl_vm_ctx_fini(ctx);
hl_asid_free(hdev, ctx->asid);
+ hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
/* Scrub both SRAM and DRAM */
hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
@@ -130,9 +185,6 @@ void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
{
if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
return;
-
- dev_warn(hdev->dev,
- "user process released device but its command submissions are still executing\n");
}
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
@@ -144,11 +196,8 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
kref_init(&ctx->refcount);
ctx->cs_sequence = 1;
- INIT_LIST_HEAD(&ctx->pending_cb_list);
- spin_lock_init(&ctx->pending_cb_lock);
spin_lock_init(&ctx->cs_lock);
atomic_set(&ctx->thread_ctx_switch_token, 1);
- atomic_set(&ctx->thread_pending_cb_token, 1);
ctx->thread_ctx_switch_wait_token = 0;
ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
sizeof(struct hl_fence *),
@@ -200,6 +249,8 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
goto err_cb_va_pool_fini;
}
+ hl_encaps_sig_mgr_init(&ctx->sig_mgr);
+
dev_dbg(hdev->dev, "create user context %d\n", ctx->asid);
}
@@ -229,25 +280,40 @@ int hl_ctx_put(struct hl_ctx *ctx)
return kref_put(&ctx->refcount, hl_ctx_do_release);
}
-struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
+/*
+ * hl_ctx_get_fence_locked - get CS fence under CS lock
+ *
+ * @ctx: pointer to the context structure.
+ * @seq: CS sequences number
+ *
+ * @return valid fence pointer on success, NULL if fence is gone, otherwise
+ * error pointer.
+ *
+ * NOTE: this function shall be called with cs_lock locked
+ */
+static struct hl_fence *hl_ctx_get_fence_locked(struct hl_ctx *ctx, u64 seq)
{
struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
struct hl_fence *fence;
- spin_lock(&ctx->cs_lock);
-
- if (seq >= ctx->cs_sequence) {
- spin_unlock(&ctx->cs_lock);
+ if (seq >= ctx->cs_sequence)
return ERR_PTR(-EINVAL);
- }
- if (seq + asic_prop->max_pending_cs < ctx->cs_sequence) {
- spin_unlock(&ctx->cs_lock);
+ if (seq + asic_prop->max_pending_cs < ctx->cs_sequence)
return NULL;
- }
fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
hl_fence_get(fence);
+ return fence;
+}
+
+struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
+{
+ struct hl_fence *fence;
+
+ spin_lock(&ctx->cs_lock);
+
+ fence = hl_ctx_get_fence_locked(ctx, seq);
spin_unlock(&ctx->cs_lock);
@@ -255,6 +321,46 @@ struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
}
/*
+ * hl_ctx_get_fences - get multiple CS fences under the same CS lock
+ *
+ * @ctx: pointer to the context structure.
+ * @seq_arr: array of CS sequences to wait for
+ * @fence: fence array to store the CS fences
+ * @arr_len: length of seq_arr and fence_arr
+ *
+ * @return 0 on success, otherwise non 0 error code
+ */
+int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
+ struct hl_fence **fence, u32 arr_len)
+{
+ struct hl_fence **fence_arr_base = fence;
+ int i, rc = 0;
+
+ spin_lock(&ctx->cs_lock);
+
+ for (i = 0; i < arr_len; i++, fence++) {
+ u64 seq = seq_arr[i];
+
+ *fence = hl_ctx_get_fence_locked(ctx, seq);
+
+ if (IS_ERR(*fence)) {
+ dev_err(ctx->hdev->dev,
+ "Failed to get fence for CS with seq 0x%llx\n",
+ seq);
+ rc = PTR_ERR(*fence);
+ break;
+ }
+ }
+
+ spin_unlock(&ctx->cs_lock);
+
+ if (rc)
+ hl_fences_put(fence_arr_base, i);
+
+ return rc;
+}
+
+/*
* hl_ctx_mgr_init - initialize the context manager
*
* @mgr: pointer to context manager structure
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 703d79fb6f3f..985f1f3dbd20 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -209,12 +209,12 @@ static int userptr_show(struct seq_file *s, void *data)
if (first) {
first = false;
seq_puts(s, "\n");
- seq_puts(s, " user virtual address size dma dir\n");
+ seq_puts(s, " pid user virtual address size dma dir\n");
seq_puts(s, "----------------------------------------------------------\n");
}
- seq_printf(s,
- " 0x%-14llx %-10u %-30s\n",
- userptr->addr, userptr->size, dma_dir[userptr->dir]);
+ seq_printf(s, " %-7d 0x%-14llx %-10llu %-30s\n",
+ userptr->pid, userptr->addr, userptr->size,
+ dma_dir[userptr->dir]);
}
spin_unlock(&dev_entry->userptr_spinlock);
@@ -235,7 +235,7 @@ static int vm_show(struct seq_file *s, void *data)
struct hl_vm_hash_node *hnode;
struct hl_userptr *userptr;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
- enum vm_type_t *vm_type;
+ enum vm_type *vm_type;
bool once = true;
u64 j;
int i;
@@ -261,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
if (*vm_type == VM_TYPE_USERPTR) {
userptr = hnode->ptr;
seq_printf(s,
- " 0x%-14llx %-10u\n",
+ " 0x%-14llx %-10llu\n",
hnode->vaddr, userptr->size);
} else {
phys_pg_pack = hnode->ptr;
@@ -320,6 +320,77 @@ static int vm_show(struct seq_file *s, void *data)
return 0;
}
+static int userptr_lookup_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct scatterlist *sg;
+ struct hl_userptr *userptr;
+ bool first = true;
+ u64 total_npages, npages, sg_start, sg_end;
+ dma_addr_t dma_addr;
+ int i;
+
+ spin_lock(&dev_entry->userptr_spinlock);
+
+ list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
+ if (dev_entry->userptr_lookup >= userptr->addr &&
+ dev_entry->userptr_lookup < userptr->addr + userptr->size) {
+ total_npages = 0;
+ for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents,
+ i) {
+ npages = hl_get_sg_info(sg, &dma_addr);
+ sg_start = userptr->addr +
+ total_npages * PAGE_SIZE;
+ sg_end = userptr->addr +
+ (total_npages + npages) * PAGE_SIZE;
+
+ if (dev_entry->userptr_lookup >= sg_start &&
+ dev_entry->userptr_lookup < sg_end) {
+ dma_addr += (dev_entry->userptr_lookup -
+ sg_start);
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " user virtual address dma address pid region start region size\n");
+ seq_puts(s, "---------------------------------------------------------------------------------------\n");
+ }
+ seq_printf(s, " 0x%-18llx 0x%-16llx %-8u 0x%-16llx %-12llu\n",
+ dev_entry->userptr_lookup,
+ (u64)dma_addr, userptr->pid,
+ userptr->addr, userptr->size);
+ }
+ total_npages += npages;
+ }
+ }
+ }
+
+ spin_unlock(&dev_entry->userptr_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static ssize_t userptr_lookup_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ ssize_t rc;
+ u64 value;
+
+ rc = kstrtoull_from_user(buf, count, 16, &value);
+ if (rc)
+ return rc;
+
+ dev_entry->userptr_lookup = value;
+
+ return count;
+}
+
static int mmu_show(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
@@ -349,7 +420,7 @@ static int mmu_show(struct seq_file *s, void *data)
return 0;
}
- phys_addr = hops_info.hop_info[hops_info.used_hops - 1].hop_pte_val;
+ hl_mmu_va_to_pa(ctx, virt_addr, &phys_addr);
if (hops_info.scrambled_vaddr &&
(dev_entry->mmu_addr != hops_info.scrambled_vaddr))
@@ -491,11 +562,10 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct hl_ctx *ctx = hdev->compute_ctx;
struct hl_vm_hash_node *hnode;
+ u64 end_address, range_size;
struct hl_userptr *userptr;
- enum vm_type_t *vm_type;
+ enum vm_type *vm_type;
bool valid = false;
- u64 end_address;
- u32 range_size;
int i, rc = 0;
if (!ctx) {
@@ -1043,6 +1113,60 @@ static ssize_t hl_security_violations_read(struct file *f, char __user *buf,
return 0;
}
+static ssize_t hl_state_dump_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ ssize_t rc;
+
+ down_read(&entry->state_dump_sem);
+ if (!entry->state_dump[entry->state_dump_head])
+ rc = 0;
+ else
+ rc = simple_read_from_buffer(
+ buf, count, ppos,
+ entry->state_dump[entry->state_dump_head],
+ strlen(entry->state_dump[entry->state_dump_head]));
+ up_read(&entry->state_dump_sem);
+
+ return rc;
+}
+
+static ssize_t hl_state_dump_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ ssize_t rc;
+ u32 size;
+ int i;
+
+ rc = kstrtouint_from_user(buf, count, 10, &size);
+ if (rc)
+ return rc;
+
+ if (size <= 0 || size >= ARRAY_SIZE(entry->state_dump)) {
+ dev_err(hdev->dev, "Invalid number of dumps to skip\n");
+ return -EINVAL;
+ }
+
+ if (entry->state_dump[entry->state_dump_head]) {
+ down_write(&entry->state_dump_sem);
+ for (i = 0; i < size; ++i) {
+ vfree(entry->state_dump[entry->state_dump_head]);
+ entry->state_dump[entry->state_dump_head] = NULL;
+ if (entry->state_dump_head > 0)
+ entry->state_dump_head--;
+ else
+ entry->state_dump_head =
+ ARRAY_SIZE(entry->state_dump) - 1;
+ }
+ up_write(&entry->state_dump_sem);
+ }
+
+ return count;
+}
+
static const struct file_operations hl_data32b_fops = {
.owner = THIS_MODULE,
.read = hl_data_read32,
@@ -1110,12 +1234,19 @@ static const struct file_operations hl_security_violations_fops = {
.read = hl_security_violations_read
};
+static const struct file_operations hl_state_dump_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_state_dump_read,
+ .write = hl_state_dump_write
+};
+
static const struct hl_info_list hl_debugfs_list[] = {
{"command_buffers", command_buffers_show, NULL},
{"command_submission", command_submission_show, NULL},
{"command_submission_jobs", command_submission_jobs_show, NULL},
{"userptr", userptr_show, NULL},
{"vm", vm_show, NULL},
+ {"userptr_lookup", userptr_lookup_show, userptr_lookup_write},
{"mmu", mmu_show, mmu_asid_va_write},
{"engines", engines_show, NULL}
};
@@ -1172,6 +1303,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
INIT_LIST_HEAD(&dev_entry->userptr_list);
INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
mutex_init(&dev_entry->file_mutex);
+ init_rwsem(&dev_entry->state_dump_sem);
spin_lock_init(&dev_entry->cb_spinlock);
spin_lock_init(&dev_entry->cs_spinlock);
spin_lock_init(&dev_entry->cs_job_spinlock);
@@ -1283,6 +1415,12 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry->root,
&hdev->skip_reset_on_timeout);
+ debugfs_create_file("state_dump",
+ 0600,
+ dev_entry->root,
+ dev_entry,
+ &hl_state_dump_fops);
+
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
0444,
@@ -1297,6 +1435,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
void hl_debugfs_remove_device(struct hl_device *hdev)
{
struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
+ int i;
debugfs_remove_recursive(entry->root);
@@ -1304,6 +1443,9 @@ void hl_debugfs_remove_device(struct hl_device *hdev)
vfree(entry->blob_desc.data);
+ for (i = 0; i < ARRAY_SIZE(entry->state_dump); ++i)
+ vfree(entry->state_dump[i]);
+
kfree(entry->entry_arr);
}
@@ -1416,6 +1558,28 @@ void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
}
+/**
+ * hl_debugfs_set_state_dump - register state dump making it accessible via
+ * debugfs
+ * @hdev: pointer to the device structure
+ * @data: the actual dump data
+ * @length: the length of the data
+ */
+void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
+ unsigned long length)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ down_write(&dev_entry->state_dump_sem);
+
+ dev_entry->state_dump_head = (dev_entry->state_dump_head + 1) %
+ ARRAY_SIZE(dev_entry->state_dump);
+ vfree(dev_entry->state_dump[dev_entry->state_dump_head]);
+ dev_entry->state_dump[dev_entry->state_dump_head] = data;
+
+ up_write(&dev_entry->state_dump_sem);
+}
+
void __init hl_debugfs_init(void)
{
hl_debug_root = debugfs_create_dir("habanalabs", NULL);
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index ff4cbde289c0..97c7c86580e6 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -7,11 +7,11 @@
#define pr_fmt(fmt) "habanalabs: " fmt
+#include <uapi/misc/habanalabs.h>
#include "habanalabs.h"
#include <linux/pci.h>
#include <linux/hwmon.h>
-#include <uapi/misc/habanalabs.h>
enum hl_device_status hl_device_status(struct hl_device *hdev)
{
@@ -23,6 +23,8 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
status = HL_DEVICE_STATUS_NEEDS_RESET;
else if (hdev->disabled)
status = HL_DEVICE_STATUS_MALFUNCTION;
+ else if (!hdev->init_done)
+ status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
else
status = HL_DEVICE_STATUS_OPERATIONAL;
@@ -44,6 +46,7 @@ bool hl_device_operational(struct hl_device *hdev,
case HL_DEVICE_STATUS_NEEDS_RESET:
return false;
case HL_DEVICE_STATUS_OPERATIONAL:
+ case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
default:
return true;
}
@@ -129,8 +132,8 @@ static int hl_device_release(struct inode *inode, struct file *filp)
hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
if (!hl_hpriv_put(hpriv))
- dev_warn(hdev->dev,
- "Device is still in use because there are live CS and/or memory mappings\n");
+ dev_notice(hdev->dev,
+ "User process closed FD but device still in use\n");
hdev->last_open_session_duration_jif =
jiffies - hdev->last_successful_open_jif;
@@ -308,9 +311,15 @@ static void device_hard_reset_pending(struct work_struct *work)
container_of(work, struct hl_device_reset_work,
reset_work.work);
struct hl_device *hdev = device_reset_work->hdev;
+ u32 flags;
int rc;
- rc = hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FROM_RESET_THREAD);
+ flags = HL_RESET_HARD | HL_RESET_FROM_RESET_THREAD;
+
+ if (device_reset_work->fw_reset)
+ flags |= HL_RESET_FW;
+
+ rc = hl_device_reset(hdev, flags);
if ((rc == -EBUSY) && !hdev->device_fini_pending) {
dev_info(hdev->dev,
"Could not reset device. will try again in %u seconds",
@@ -682,6 +691,44 @@ out:
return rc;
}
+static void take_release_locks(struct hl_device *hdev)
+{
+ /* Flush anyone that is inside the critical section of enqueue
+ * jobs to the H/W
+ */
+ hdev->asic_funcs->hw_queues_lock(hdev);
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ /* Flush processes that are sending message to CPU */
+ mutex_lock(&hdev->send_cpu_message_lock);
+ mutex_unlock(&hdev->send_cpu_message_lock);
+
+ /* Flush anyone that is inside device open */
+ mutex_lock(&hdev->fpriv_list_lock);
+ mutex_unlock(&hdev->fpriv_list_lock);
+}
+
+static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset)
+{
+ if (hard_reset)
+ device_late_fini(hdev);
+
+ /*
+ * Halt the engines and disable interrupts so we won't get any more
+ * completions from H/W and we won't have any accesses from the
+ * H/W to the host machine
+ */
+ hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset);
+
+ /* Go over all the queues, release all CS and their jobs */
+ hl_cs_rollback_all(hdev);
+
+ /* Release all pending user interrupts, each pending user interrupt
+ * holds a reference to user context
+ */
+ hl_release_pending_user_interrupts(hdev);
+}
+
/*
* hl_device_suspend - initiate device suspend
*
@@ -707,16 +754,7 @@ int hl_device_suspend(struct hl_device *hdev)
/* This blocks all other stuff that is not blocked by in_reset */
hdev->disabled = true;
- /*
- * Flush anyone that is inside the critical section of enqueue
- * jobs to the H/W
- */
- hdev->asic_funcs->hw_queues_lock(hdev);
- hdev->asic_funcs->hw_queues_unlock(hdev);
-
- /* Flush processes that are sending message to CPU */
- mutex_lock(&hdev->send_cpu_message_lock);
- mutex_unlock(&hdev->send_cpu_message_lock);
+ take_release_locks(hdev);
rc = hdev->asic_funcs->suspend(hdev);
if (rc)
@@ -819,6 +857,11 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
usleep_range(1000, 10000);
put_task_struct(task);
+ } else {
+ dev_warn(hdev->dev,
+ "Can't get task struct for PID so giving up on killing process\n");
+ mutex_unlock(&hdev->fpriv_list_lock);
+ return -ETIME;
}
}
@@ -885,7 +928,7 @@ static void device_disable_open_processes(struct hl_device *hdev)
int hl_device_reset(struct hl_device *hdev, u32 flags)
{
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
- bool hard_reset, from_hard_reset_thread, hard_instead_soft = false;
+ bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false;
int i, rc;
if (!hdev->init_done) {
@@ -894,8 +937,9 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
return 0;
}
- hard_reset = (flags & HL_RESET_HARD) != 0;
- from_hard_reset_thread = (flags & HL_RESET_FROM_RESET_THREAD) != 0;
+ hard_reset = !!(flags & HL_RESET_HARD);
+ from_hard_reset_thread = !!(flags & HL_RESET_FROM_RESET_THREAD);
+ fw_reset = !!(flags & HL_RESET_FW);
if (!hard_reset && !hdev->supports_soft_reset) {
hard_instead_soft = true;
@@ -947,11 +991,13 @@ do_reset:
else
hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
- /*
- * if reset is due to heartbeat, device CPU is no responsive in
- * which case no point sending PCI disable message to it
+ /* If reset is due to heartbeat, device CPU is no responsive in
+ * which case no point sending PCI disable message to it.
+ *
+ * If F/W is performing the reset, no need to send it a message to disable
+ * PCI access
*/
- if (hard_reset && !(flags & HL_RESET_HEARTBEAT)) {
+ if (hard_reset && !(flags & (HL_RESET_HEARTBEAT | HL_RESET_FW))) {
/* Disable PCI access from device F/W so he won't send
* us additional interrupts. We disable MSI/MSI-X at
* the halt_engines function and we can't have the F/W
@@ -970,15 +1016,7 @@ do_reset:
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
- /* Flush anyone that is inside the critical section of enqueue
- * jobs to the H/W
- */
- hdev->asic_funcs->hw_queues_lock(hdev);
- hdev->asic_funcs->hw_queues_unlock(hdev);
-
- /* Flush anyone that is inside device open */
- mutex_lock(&hdev->fpriv_list_lock);
- mutex_unlock(&hdev->fpriv_list_lock);
+ take_release_locks(hdev);
dev_err(hdev->dev, "Going to RESET device!\n");
}
@@ -989,6 +1027,8 @@ again:
hdev->process_kill_trial_cnt = 0;
+ hdev->device_reset_work.fw_reset = fw_reset;
+
/*
* Because the reset function can't run from heartbeat work,
* we need to call the reset function from a dedicated work.
@@ -999,31 +1039,7 @@ again:
return 0;
}
- if (hard_reset) {
- device_late_fini(hdev);
-
- /*
- * Now that the heartbeat thread is closed, flush processes
- * which are sending messages to CPU
- */
- mutex_lock(&hdev->send_cpu_message_lock);
- mutex_unlock(&hdev->send_cpu_message_lock);
- }
-
- /*
- * Halt the engines and disable interrupts so we won't get any more
- * completions from H/W and we won't have any accesses from the
- * H/W to the host machine
- */
- hdev->asic_funcs->halt_engines(hdev, hard_reset);
-
- /* Go over all the queues, release all CS and their jobs */
- hl_cs_rollback_all(hdev);
-
- /* Release all pending user interrupts, each pending user interrupt
- * holds a reference to user context
- */
- hl_release_pending_user_interrupts(hdev);
+ cleanup_resources(hdev, hard_reset, fw_reset);
kill_processes:
if (hard_reset) {
@@ -1057,12 +1073,15 @@ kill_processes:
}
/* Reset the H/W. It will be in idle state after this returns */
- hdev->asic_funcs->hw_fini(hdev, hard_reset);
+ hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
if (hard_reset) {
+ hdev->fw_loader.linux_loaded = false;
+
/* Release kernel context */
if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
hdev->kernel_ctx = NULL;
+
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
hl_eq_reset(hdev, &hdev->event_queue);
@@ -1292,6 +1311,10 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
if (rc)
goto user_interrupts_fini;
+
+ /* initialize completion structure for multi CS wait */
+ hl_multi_cs_completion_init(hdev);
+
/*
* Initialize the H/W queues. Must be done before hw_init, because
* there the addresses of the kernel queue are being written to the
@@ -1361,6 +1384,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
hdev->compute_ctx = NULL;
+ hdev->asic_funcs->state_dump_init(hdev);
+
hl_debugfs_add_device(hdev);
/* debugfs nodes are created in hl_ctx_init so it must be called after
@@ -1567,31 +1592,13 @@ void hl_device_fini(struct hl_device *hdev)
/* Mark device as disabled */
hdev->disabled = true;
- /* Flush anyone that is inside the critical section of enqueue
- * jobs to the H/W
- */
- hdev->asic_funcs->hw_queues_lock(hdev);
- hdev->asic_funcs->hw_queues_unlock(hdev);
-
- /* Flush anyone that is inside device open */
- mutex_lock(&hdev->fpriv_list_lock);
- mutex_unlock(&hdev->fpriv_list_lock);
+ take_release_locks(hdev);
hdev->hard_reset_pending = true;
hl_hwmon_fini(hdev);
- device_late_fini(hdev);
-
- /*
- * Halt the engines and disable interrupts so we won't get any more
- * completions from H/W and we won't have any accesses from the
- * H/W to the host machine
- */
- hdev->asic_funcs->halt_engines(hdev, true);
-
- /* Go over all the queues, release all CS and their jobs */
- hl_cs_rollback_all(hdev);
+ cleanup_resources(hdev, true, false);
/* Kill processes here after CS rollback. This is because the process
* can't really exit until all its CSs are done, which is what we
@@ -1610,7 +1617,9 @@ void hl_device_fini(struct hl_device *hdev)
hl_cb_pool_fini(hdev);
/* Reset the H/W. It will be in idle state after this returns */
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
+
+ hdev->fw_loader.linux_loaded = false;
/* Release kernel context */
if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 2e4d04ec6b53..8d2568c63f19 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -240,11 +240,15 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
/* set fence to a non valid value */
pkt->fence = cpu_to_le32(UINT_MAX);
- rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, len, pkt_dma_addr);
- if (rc) {
- dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
- goto out;
- }
+ /*
+ * The CPU queue is a synchronous queue with an effective depth of
+ * a single entry (although it is allocated with room for multiple
+ * entries). We lock on it using 'send_cpu_message_lock' which
+ * serializes accesses to the CPU queue.
+ * Which means that we don't need to lock the access to the entire H/W
+ * queues module when submitting a JOB to the CPU queue.
+ */
+ hl_hw_queue_submit_bd(hdev, queue, 0, len, pkt_dma_addr);
if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
expected_ack_val = queue->pi;
@@ -663,17 +667,15 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev,
hdev->event_queue.check_eqe_index = false;
/* Read FW application security bits again */
- if (hdev->asic_prop.fw_cpu_boot_dev_sts0_valid) {
- hdev->asic_prop.fw_app_cpu_boot_dev_sts0 =
- RREG32(sts_boot_dev_sts0_reg);
- if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ if (prop->fw_cpu_boot_dev_sts0_valid) {
+ prop->fw_app_cpu_boot_dev_sts0 = RREG32(sts_boot_dev_sts0_reg);
+ if (prop->fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_EQ_INDEX_EN)
hdev->event_queue.check_eqe_index = true;
}
- if (hdev->asic_prop.fw_cpu_boot_dev_sts1_valid)
- hdev->asic_prop.fw_app_cpu_boot_dev_sts1 =
- RREG32(sts_boot_dev_sts1_reg);
+ if (prop->fw_cpu_boot_dev_sts1_valid)
+ prop->fw_app_cpu_boot_dev_sts1 = RREG32(sts_boot_dev_sts1_reg);
out:
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
@@ -1008,6 +1010,11 @@ void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
} else {
WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE);
msleep(static_loader->cpu_reset_wait_msec);
+
+ /* Must clear this register in order to prevent preboot
+ * from reading WFE after reboot
+ */
+ WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_NA);
}
hdev->device_cpu_is_halted = true;
@@ -1055,6 +1062,10 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
dev_err(hdev->dev,
"Device boot progress - Thermal Sensor initialization failed\n");
break;
+ case CPU_BOOT_STATUS_SECURITY_READY:
+ dev_err(hdev->dev,
+ "Device boot progress - Stuck in preboot after security initialization\n");
+ break;
default:
dev_err(hdev->dev,
"Device boot progress - Invalid status code %d\n",
@@ -1238,11 +1249,6 @@ static void hl_fw_preboot_update_state(struct hl_device *hdev)
* b. Check whether hard reset is done by boot cpu
* 3. FW application - a. Fetch fw application security status
* b. Check whether hard reset is done by fw app
- *
- * Preboot:
- * Check security status bit (CPU_BOOT_DEV_STS0_ENABLED). If set, then-
- * check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN)
- * If set, then mark GIC controller to be disabled.
*/
prop->hard_reset_done_by_fw =
!!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
@@ -1953,8 +1959,8 @@ static void hl_fw_dynamic_update_linux_interrupt_if(struct hl_device *hdev)
if (!hdev->asic_prop.gic_interrupts_enable &&
!(hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN)) {
- dyn_regs->gic_host_halt_irq = dyn_regs->gic_host_irq_ctrl;
- dyn_regs->gic_host_ints_irq = dyn_regs->gic_host_irq_ctrl;
+ dyn_regs->gic_host_halt_irq = dyn_regs->gic_host_pi_upd_irq;
+ dyn_regs->gic_host_ints_irq = dyn_regs->gic_host_pi_upd_irq;
dev_warn(hdev->dev,
"Using a single interrupt interface towards cpucp");
@@ -2122,8 +2128,7 @@ static void hl_fw_linux_update_state(struct hl_device *hdev,
/* Read FW application security bits */
if (prop->fw_cpu_boot_dev_sts0_valid) {
- prop->fw_app_cpu_boot_dev_sts0 =
- RREG32(cpu_boot_dev_sts0_reg);
+ prop->fw_app_cpu_boot_dev_sts0 = RREG32(cpu_boot_dev_sts0_reg);
if (prop->fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
@@ -2143,8 +2148,7 @@ static void hl_fw_linux_update_state(struct hl_device *hdev,
}
if (prop->fw_cpu_boot_dev_sts1_valid) {
- prop->fw_app_cpu_boot_dev_sts1 =
- RREG32(cpu_boot_dev_sts1_reg);
+ prop->fw_app_cpu_boot_dev_sts1 = RREG32(cpu_boot_dev_sts1_reg);
dev_dbg(hdev->dev,
"Firmware application CPU status1 %#x\n",
@@ -2235,6 +2239,10 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
dev_info(hdev->dev,
"Loading firmware to device, may take some time...\n");
+ /*
+ * In this stage, "cpu_dyn_regs" contains only LKD's hard coded values!
+ * It will be updated from FW after hl_fw_dynamic_request_descriptor().
+ */
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 6b3cdd7e068a..bebebcb163ee 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -20,6 +20,7 @@
#include <linux/scatterlist.h>
#include <linux/hashtable.h>
#include <linux/debugfs.h>
+#include <linux/rwsem.h>
#include <linux/bitfield.h>
#include <linux/genalloc.h>
#include <linux/sched/signal.h>
@@ -65,6 +66,11 @@
#define HL_COMMON_USER_INTERRUPT_ID 0xFFF
+#define HL_STATE_DUMP_HIST_LEN 5
+
+#define OBJ_NAMES_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+#define SYNC_TO_ENGINE_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+
/* Memory */
#define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
@@ -122,12 +128,17 @@ enum hl_mmu_page_table_location {
*
* - HL_RESET_DEVICE_RELEASE
* Set if reset is due to device release
+ *
+ * - HL_RESET_FW
+ * F/W will perform the reset. No need to ask it to reset the device. This is relevant
+ * only when running with secured f/w
*/
#define HL_RESET_HARD (1 << 0)
#define HL_RESET_FROM_RESET_THREAD (1 << 1)
#define HL_RESET_HEARTBEAT (1 << 2)
#define HL_RESET_TDR (1 << 3)
#define HL_RESET_DEVICE_RELEASE (1 << 4)
+#define HL_RESET_FW (1 << 5)
#define HL_MAX_SOBS_PER_MONITOR 8
@@ -236,7 +247,9 @@ enum hl_cs_type {
CS_TYPE_DEFAULT,
CS_TYPE_SIGNAL,
CS_TYPE_WAIT,
- CS_TYPE_COLLECTIVE_WAIT
+ CS_TYPE_COLLECTIVE_WAIT,
+ CS_RESERVE_SIGNALS,
+ CS_UNRESERVE_SIGNALS
};
/*
@@ -281,13 +294,17 @@ enum queue_cb_alloc_flags {
* @hdev: habanalabs device structure.
* @kref: refcount of this SOB. The SOB will reset once the refcount is zero.
* @sob_id: id of this SOB.
+ * @sob_addr: the sob offset from the base address.
* @q_idx: the H/W queue that uses this SOB.
+ * @need_reset: reset indication set when switching to the other sob.
*/
struct hl_hw_sob {
struct hl_device *hdev;
struct kref kref;
u32 sob_id;
+ u32 sob_addr;
u32 q_idx;
+ bool need_reset;
};
enum hl_collective_mode {
@@ -317,11 +334,11 @@ struct hw_queue_properties {
};
/**
- * enum vm_type_t - virtual memory mapping request information.
+ * enum vm_type - virtual memory mapping request information.
* @VM_TYPE_USERPTR: mapping of user memory to device virtual address.
* @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
*/
-enum vm_type_t {
+enum vm_type {
VM_TYPE_USERPTR = 0x1,
VM_TYPE_PHYS_PACK = 0x2
};
@@ -382,6 +399,16 @@ struct hl_mmu_properties {
};
/**
+ * struct hl_hints_range - hint addresses reserved va range.
+ * @start_addr: start address of the va range.
+ * @end_addr: end address of the va range.
+ */
+struct hl_hints_range {
+ u64 start_addr;
+ u64 end_addr;
+};
+
+/**
* struct asic_fixed_properties - ASIC specific immutable properties.
* @hw_queues_props: H/W queues properties.
* @cpucp_info: received various information from CPU-CP regarding the H/W, e.g.
@@ -392,6 +419,10 @@ struct hl_mmu_properties {
* @pmmu: PCI (host) MMU address translation properties.
* @pmmu_huge: PCI (host) MMU address translation properties for memory
* allocated with huge pages.
+ * @hints_dram_reserved_va_range: dram hint addresses reserved range.
+ * @hints_host_reserved_va_range: host hint addresses reserved range.
+ * @hints_host_hpage_reserved_va_range: host huge page hint addresses reserved
+ * range.
* @sram_base_address: SRAM physical start address.
* @sram_end_address: SRAM physical end address.
* @sram_user_base_address - SRAM physical start address for user access.
@@ -412,6 +443,10 @@ struct hl_mmu_properties {
* to the device's MMU.
* @cb_va_end_addr: virtual end address of command buffers which are mapped to
* the device's MMU.
+ * @dram_hints_align_mask: dram va hint addresses alignment mask which is used
+ * for hints validity check.
+ * device_dma_offset_for_host_access: the offset to add to host DMA addresses
+ * to enable the device to access them.
* @mmu_pgt_size: MMU page tables total size.
* @mmu_pte_size: PTE size in MMU page tables.
* @mmu_hop_table_size: MMU hop table size.
@@ -459,6 +494,8 @@ struct hl_mmu_properties {
* reserved for the user
* @first_available_cq: first available CQ for the user.
* @user_interrupt_count: number of user interrupts.
+ * @server_type: Server type that the ASIC is currently installed in.
+ * The value is according to enum hl_server_type in uapi file.
* @tpc_enabled_mask: which TPCs are enabled.
* @completion_queues_count: number of completion queues.
* @fw_security_enabled: true if security measures are enabled in firmware,
@@ -470,6 +507,7 @@ struct hl_mmu_properties {
* @dram_supports_virtual_memory: is there an MMU towards the DRAM
* @hard_reset_done_by_fw: true if firmware is handling hard reset flow
* @num_functional_hbms: number of functional HBMs in each DCORE.
+ * @hints_range_reservation: device support hint addresses range reservation.
* @iatu_done_by_fw: true if iATU configuration is being done by FW.
* @dynamic_fw_load: is dynamic FW load is supported.
* @gic_interrupts_enable: true if FW is not blocking GIC controller,
@@ -483,6 +521,9 @@ struct asic_fixed_properties {
struct hl_mmu_properties dmmu;
struct hl_mmu_properties pmmu;
struct hl_mmu_properties pmmu_huge;
+ struct hl_hints_range hints_dram_reserved_va_range;
+ struct hl_hints_range hints_host_reserved_va_range;
+ struct hl_hints_range hints_host_hpage_reserved_va_range;
u64 sram_base_address;
u64 sram_end_address;
u64 sram_user_base_address;
@@ -500,6 +541,8 @@ struct asic_fixed_properties {
u64 mmu_dram_default_page_addr;
u64 cb_va_start_addr;
u64 cb_va_end_addr;
+ u64 dram_hints_align_mask;
+ u64 device_dma_offset_for_host_access;
u32 mmu_pgt_size;
u32 mmu_pte_size;
u32 mmu_hop_table_size;
@@ -534,6 +577,7 @@ struct asic_fixed_properties {
u16 first_available_user_msix_interrupt;
u16 first_available_cq[HL_MAX_DCORES];
u16 user_interrupt_count;
+ u16 server_type;
u8 tpc_enabled_mask;
u8 completion_queues_count;
u8 fw_security_enabled;
@@ -542,6 +586,7 @@ struct asic_fixed_properties {
u8 dram_supports_virtual_memory;
u8 hard_reset_done_by_fw;
u8 num_functional_hbms;
+ u8 hints_range_reservation;
u8 iatu_done_by_fw;
u8 dynamic_fw_load;
u8 gic_interrupts_enable;
@@ -552,40 +597,45 @@ struct asic_fixed_properties {
* @completion: fence is implemented using completion
* @refcount: refcount for this fence
* @cs_sequence: sequence of the corresponding command submission
+ * @stream_master_qid_map: streams masters QID bitmap to represent all streams
+ * masters QIDs that multi cs is waiting on
* @error: mark this fence with error
* @timestamp: timestamp upon completion
- *
*/
struct hl_fence {
struct completion completion;
struct kref refcount;
u64 cs_sequence;
+ u32 stream_master_qid_map;
int error;
ktime_t timestamp;
};
/**
* struct hl_cs_compl - command submission completion object.
- * @sob_reset_work: workqueue object to run SOB reset flow.
* @base_fence: hl fence object.
* @lock: spinlock to protect fence.
* @hdev: habanalabs device structure.
* @hw_sob: the H/W SOB used in this signal/wait CS.
+ * @encaps_sig_hdl: encaps signals hanlder.
* @cs_seq: command submission sequence number.
* @type: type of the CS - signal/wait.
* @sob_val: the SOB value that is used in this signal/wait CS.
* @sob_group: the SOB group that is used in this collective wait CS.
+ * @encaps_signals: indication whether it's a completion object of cs with
+ * encaps signals or not.
*/
struct hl_cs_compl {
- struct work_struct sob_reset_work;
struct hl_fence base_fence;
spinlock_t lock;
struct hl_device *hdev;
struct hl_hw_sob *hw_sob;
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
u64 cs_seq;
enum hl_cs_type type;
u16 sob_val;
u16 sob_group;
+ bool encaps_signals;
};
/*
@@ -698,6 +748,17 @@ struct hl_sync_stream_properties {
};
/**
+ * struct hl_encaps_signals_mgr - describes sync stream encapsulated signals
+ * handlers manager
+ * @lock: protects handles.
+ * @handles: an idr to hold all encapsulated signals handles.
+ */
+struct hl_encaps_signals_mgr {
+ spinlock_t lock;
+ struct idr handles;
+};
+
+/**
* struct hl_hw_queue - describes a H/W transport queue.
* @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
* @sync_stream_prop: sync stream queue properties
@@ -875,7 +936,7 @@ struct pci_mem_region {
u64 region_base;
u64 region_size;
u64 bar_size;
- u32 offset_in_bar;
+ u64 offset_in_bar;
u8 bar_id;
u8 used;
};
@@ -996,7 +1057,7 @@ struct fw_load_mgr {
* hw_fini and before CS rollback.
* @suspend: handles IP specific H/W or SW changes for suspend.
* @resume: handles IP specific H/W or SW changes for resume.
- * @cb_mmap: maps a CB.
+ * @mmap: maps a memory.
* @ring_doorbell: increment PI on a given QMAN.
* @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
* function because the PQs are located in different memory areas
@@ -1101,6 +1162,10 @@ struct fw_load_mgr {
* generic f/w compatible PLL Indexes
* @init_firmware_loader: initialize data for FW loader.
* @init_cpu_scrambler_dram: Enable CPU specific DRAM scrambling
+ * @state_dump_init: initialize constants required for state dump
+ * @get_sob_addr: get SOB base address offset.
+ * @set_pci_memory_regions: setting properties of PCI memory regions
+ * @get_stream_master_qid_arr: get pointer to stream masters QID array
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -1110,11 +1175,11 @@ struct hl_asic_funcs {
int (*sw_init)(struct hl_device *hdev);
int (*sw_fini)(struct hl_device *hdev);
int (*hw_init)(struct hl_device *hdev);
- void (*hw_fini)(struct hl_device *hdev, bool hard_reset);
- void (*halt_engines)(struct hl_device *hdev, bool hard_reset);
+ void (*hw_fini)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
+ void (*halt_engines)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
int (*suspend)(struct hl_device *hdev);
int (*resume)(struct hl_device *hdev);
- int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
+ int (*mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
@@ -1210,10 +1275,11 @@ struct hl_asic_funcs {
void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group);
void (*set_dma_mask_from_fw)(struct hl_device *hdev);
u64 (*get_device_time)(struct hl_device *hdev);
- void (*collective_wait_init_cs)(struct hl_cs *cs);
+ int (*collective_wait_init_cs)(struct hl_cs *cs);
int (*collective_wait_create_jobs)(struct hl_device *hdev,
- struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
- u32 collective_engine_id);
+ struct hl_ctx *ctx, struct hl_cs *cs,
+ u32 wait_queue_id, u32 collective_engine_id,
+ u32 encaps_signal_offset);
u64 (*scramble_addr)(struct hl_device *hdev, u64 addr);
u64 (*descramble_addr)(struct hl_device *hdev, u64 addr);
void (*ack_protection_bits_errors)(struct hl_device *hdev);
@@ -1226,6 +1292,10 @@ struct hl_asic_funcs {
int (*map_pll_idx_to_fw_idx)(u32 pll_idx);
void (*init_firmware_loader)(struct hl_device *hdev);
void (*init_cpu_scrambler_dram)(struct hl_device *hdev);
+ void (*state_dump_init)(struct hl_device *hdev);
+ u32 (*get_sob_addr)(struct hl_device *hdev, u32 sob_id);
+ void (*set_pci_memory_regions)(struct hl_device *hdev);
+ u32* (*get_stream_master_qid_arr)(void);
};
@@ -1283,20 +1353,6 @@ struct hl_cs_counters_atomic {
};
/**
- * struct hl_pending_cb - pending command buffer structure
- * @cb_node: cb node in pending cb list
- * @cb: command buffer to send in next submission
- * @cb_size: command buffer size
- * @hw_queue_id: destination queue id
- */
-struct hl_pending_cb {
- struct list_head cb_node;
- struct hl_cb *cb;
- u32 cb_size;
- u32 hw_queue_id;
-};
-
-/**
* struct hl_ctx - user/kernel context.
* @mem_hash: holds mapping from virtual address to virtual memory area
* descriptor (hl_vm_phys_pg_list or hl_userptr).
@@ -1312,28 +1368,21 @@ struct hl_pending_cb {
* MMU hash or walking the PGT requires talking this lock.
* @hw_block_list_lock: protects the HW block memory list.
* @debugfs_list: node in debugfs list of contexts.
- * pending_cb_list: list of pending command buffers waiting to be sent upon
- * next user command submission context.
* @hw_block_mem_list: list of HW block virtual mapped addresses.
* @cs_counters: context command submission counters.
* @cb_va_pool: device VA pool for command buffers which are mapped to the
* device's MMU.
+ * @sig_mgr: encaps signals handle manager.
* @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
* to user so user could inquire about CS. It is used as
* index to cs_pending array.
* @dram_default_hops: array that holds all hops addresses needed for default
* DRAM mapping.
- * @pending_cb_lock: spinlock to protect pending cb list
* @cs_lock: spinlock to protect cs_sequence.
* @dram_phys_mem: amount of used physical DRAM memory by this context.
* @thread_ctx_switch_token: token to prevent multiple threads of the same
* context from running the context switch phase.
* Only a single thread should run it.
- * @thread_pending_cb_token: token to prevent multiple threads from processing
- * the pending CB list. Only a single thread should
- * process the list since it is protected by a
- * spinlock and we don't want to halt the entire
- * command submission sequence.
* @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
* the context switch phase from moving to their
* execution phase before the context switch phase
@@ -1353,17 +1402,15 @@ struct hl_ctx {
struct mutex mmu_lock;
struct mutex hw_block_list_lock;
struct list_head debugfs_list;
- struct list_head pending_cb_list;
struct list_head hw_block_mem_list;
struct hl_cs_counters_atomic cs_counters;
struct gen_pool *cb_va_pool;
+ struct hl_encaps_signals_mgr sig_mgr;
u64 cs_sequence;
u64 *dram_default_hops;
- spinlock_t pending_cb_lock;
spinlock_t cs_lock;
atomic64_t dram_phys_mem;
atomic_t thread_ctx_switch_token;
- atomic_t thread_pending_cb_token;
u32 thread_ctx_switch_wait_token;
u32 asid;
u32 handle;
@@ -1394,20 +1441,22 @@ struct hl_ctx_mgr {
* @sgt: pointer to the scatter-gather table that holds the pages.
* @dir: for DMA unmapping, the direction must be supplied, so save it.
* @debugfs_list: node in debugfs list of command submissions.
+ * @pid: the pid of the user process owning the memory
* @addr: user-space virtual address of the start of the memory area.
* @size: size of the memory area to pin & map.
* @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
*/
struct hl_userptr {
- enum vm_type_t vm_type; /* must be first */
+ enum vm_type vm_type; /* must be first */
struct list_head job_node;
struct page **pages;
unsigned int npages;
struct sg_table *sgt;
enum dma_data_direction dir;
struct list_head debugfs_list;
+ pid_t pid;
u64 addr;
- u32 size;
+ u64 size;
u8 dma_mapped;
};
@@ -1426,12 +1475,14 @@ struct hl_userptr {
* @mirror_node : node in device mirror list of command submissions.
* @staged_cs_node: node in the staged cs list.
* @debugfs_list: node in debugfs list of command submissions.
+ * @encaps_sig_hdl: holds the encaps signals handle.
* @sequence: the sequence number of this CS.
* @staged_sequence: the sequence of the staged submission this CS is part of,
* relevant only if staged_cs is set.
* @timeout_jiffies: cs timeout in jiffies.
* @submission_time_jiffies: submission time of the cs
* @type: CS_TYPE_*.
+ * @encaps_sig_hdl_id: encaps signals handle id, set for the first staged cs.
* @submitted: true if CS was submitted to H/W.
* @completed: true if CS was completed by device.
* @timedout : true if CS was timedout.
@@ -1445,6 +1496,7 @@ struct hl_userptr {
* @staged_cs: true if this CS is part of a staged submission.
* @skip_reset_on_timeout: true if we shall not reset the device in case
* timeout occurs (debug scenario).
+ * @encaps_signals: true if this CS has encaps reserved signals.
*/
struct hl_cs {
u16 *jobs_in_queue_cnt;
@@ -1459,11 +1511,13 @@ struct hl_cs {
struct list_head mirror_node;
struct list_head staged_cs_node;
struct list_head debugfs_list;
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
u64 sequence;
u64 staged_sequence;
u64 timeout_jiffies;
u64 submission_time_jiffies;
enum hl_cs_type type;
+ u32 encaps_sig_hdl_id;
u8 submitted;
u8 completed;
u8 timedout;
@@ -1474,6 +1528,7 @@ struct hl_cs {
u8 staged_first;
u8 staged_cs;
u8 skip_reset_on_timeout;
+ u8 encaps_signals;
};
/**
@@ -1493,6 +1548,8 @@ struct hl_cs {
* @hw_queue_id: the id of the H/W queue this job is submitted to.
* @user_cb_size: the actual size of the CB we got from the user.
* @job_cb_size: the actual size of the CB that we put on the queue.
+ * @encaps_sig_wait_offset: encapsulated signals offset, which allow user
+ * to wait on part of the reserved signals.
* @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
* handle to a kernel-allocated CB object, false
* otherwise (SRAM/DRAM/host address).
@@ -1517,6 +1574,7 @@ struct hl_cs_job {
u32 hw_queue_id;
u32 user_cb_size;
u32 job_cb_size;
+ u32 encaps_sig_wait_offset;
u8 is_kernel_allocated_cb;
u8 contains_dma_pkt;
};
@@ -1613,7 +1671,7 @@ struct hl_vm_hw_block_list_node {
* @created_from_userptr: is product of host virtual address.
*/
struct hl_vm_phys_pg_pack {
- enum vm_type_t vm_type; /* must be first */
+ enum vm_type vm_type; /* must be first */
u64 *pages;
u64 npages;
u64 total_size;
@@ -1759,9 +1817,13 @@ struct hl_debugfs_entry {
* @ctx_mem_hash_list: list of available contexts with MMU mappings.
* @ctx_mem_hash_spinlock: protects cb_list.
* @blob_desc: descriptor of blob
+ * @state_dump: data of the system states in case of a bad cs.
+ * @state_dump_sem: protects state_dump.
* @addr: next address to read/write from/to in read/write32.
* @mmu_addr: next virtual address to translate to physical address in mmu_show.
+ * @userptr_lookup: the target user ptr to look up for on demand.
* @mmu_asid: ASID to use while translating in mmu_show.
+ * @state_dump_head: index of the latest state dump
* @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
* @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
* @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
@@ -1783,14 +1845,149 @@ struct hl_dbg_device_entry {
struct list_head ctx_mem_hash_list;
spinlock_t ctx_mem_hash_spinlock;
struct debugfs_blob_wrapper blob_desc;
+ char *state_dump[HL_STATE_DUMP_HIST_LEN];
+ struct rw_semaphore state_dump_sem;
u64 addr;
u64 mmu_addr;
+ u64 userptr_lookup;
u32 mmu_asid;
+ u32 state_dump_head;
u8 i2c_bus;
u8 i2c_addr;
u8 i2c_reg;
};
+/**
+ * struct hl_hw_obj_name_entry - single hw object name, member of
+ * hl_state_dump_specs
+ * @node: link to the containing hash table
+ * @name: hw object name
+ * @id: object identifier
+ */
+struct hl_hw_obj_name_entry {
+ struct hlist_node node;
+ const char *name;
+ u32 id;
+};
+
+enum hl_state_dump_specs_props {
+ SP_SYNC_OBJ_BASE_ADDR,
+ SP_NEXT_SYNC_OBJ_ADDR,
+ SP_SYNC_OBJ_AMOUNT,
+ SP_MON_OBJ_WR_ADDR_LOW,
+ SP_MON_OBJ_WR_ADDR_HIGH,
+ SP_MON_OBJ_WR_DATA,
+ SP_MON_OBJ_ARM_DATA,
+ SP_MON_OBJ_STATUS,
+ SP_MONITORS_AMOUNT,
+ SP_TPC0_CMDQ,
+ SP_TPC0_CFG_SO,
+ SP_NEXT_TPC,
+ SP_MME_CMDQ,
+ SP_MME_CFG_SO,
+ SP_NEXT_MME,
+ SP_DMA_CMDQ,
+ SP_DMA_CFG_SO,
+ SP_DMA_QUEUES_OFFSET,
+ SP_NUM_OF_MME_ENGINES,
+ SP_SUB_MME_ENG_NUM,
+ SP_NUM_OF_DMA_ENGINES,
+ SP_NUM_OF_TPC_ENGINES,
+ SP_ENGINE_NUM_OF_QUEUES,
+ SP_ENGINE_NUM_OF_STREAMS,
+ SP_ENGINE_NUM_OF_FENCES,
+ SP_FENCE0_CNT_OFFSET,
+ SP_FENCE0_RDATA_OFFSET,
+ SP_CP_STS_OFFSET,
+ SP_NUM_CORES,
+
+ SP_MAX
+};
+
+enum hl_sync_engine_type {
+ ENGINE_TPC,
+ ENGINE_DMA,
+ ENGINE_MME,
+};
+
+/**
+ * struct hl_mon_state_dump - represents a state dump of a single monitor
+ * @id: monitor id
+ * @wr_addr_low: address monitor will write to, low bits
+ * @wr_addr_high: address monitor will write to, high bits
+ * @wr_data: data monitor will write
+ * @arm_data: register value containing monitor configuration
+ * @status: monitor status
+ */
+struct hl_mon_state_dump {
+ u32 id;
+ u32 wr_addr_low;
+ u32 wr_addr_high;
+ u32 wr_data;
+ u32 arm_data;
+ u32 status;
+};
+
+/**
+ * struct hl_sync_to_engine_map_entry - sync object id to engine mapping entry
+ * @engine_type: type of the engine
+ * @engine_id: id of the engine
+ * @sync_id: id of the sync object
+ */
+struct hl_sync_to_engine_map_entry {
+ struct hlist_node node;
+ enum hl_sync_engine_type engine_type;
+ u32 engine_id;
+ u32 sync_id;
+};
+
+/**
+ * struct hl_sync_to_engine_map - maps sync object id to associated engine id
+ * @tb: hash table containing the mapping, each element is of type
+ * struct hl_sync_to_engine_map_entry
+ */
+struct hl_sync_to_engine_map {
+ DECLARE_HASHTABLE(tb, SYNC_TO_ENGINE_HASH_TABLE_BITS);
+};
+
+/**
+ * struct hl_state_dump_specs_funcs - virtual functions used by the state dump
+ * @gen_sync_to_engine_map: generate a hash map from sync obj id to its engine
+ * @print_single_monitor: format monitor data as string
+ * @monitor_valid: return true if given monitor dump is valid
+ * @print_fences_single_engine: format fences data as string
+ */
+struct hl_state_dump_specs_funcs {
+ int (*gen_sync_to_engine_map)(struct hl_device *hdev,
+ struct hl_sync_to_engine_map *map);
+ int (*print_single_monitor)(char **buf, size_t *size, size_t *offset,
+ struct hl_device *hdev,
+ struct hl_mon_state_dump *mon);
+ int (*monitor_valid)(struct hl_mon_state_dump *mon);
+ int (*print_fences_single_engine)(struct hl_device *hdev,
+ u64 base_offset,
+ u64 status_base_offset,
+ enum hl_sync_engine_type engine_type,
+ u32 engine_id, char **buf,
+ size_t *size, size_t *offset);
+};
+
+/**
+ * struct hl_state_dump_specs - defines ASIC known hw objects names
+ * @so_id_to_str_tb: sync objects names index table
+ * @monitor_id_to_str_tb: monitors names index table
+ * @funcs: virtual functions used for state dump
+ * @sync_namager_names: readable names for sync manager if available (ex: N_E)
+ * @props: pointer to a per asic const props array required for state dump
+ */
+struct hl_state_dump_specs {
+ DECLARE_HASHTABLE(so_id_to_str_tb, OBJ_NAMES_HASH_TABLE_BITS);
+ DECLARE_HASHTABLE(monitor_id_to_str_tb, OBJ_NAMES_HASH_TABLE_BITS);
+ struct hl_state_dump_specs_funcs funcs;
+ const char * const *sync_namager_names;
+ s64 *props;
+};
+
/*
* DEVICES
@@ -1798,7 +1995,7 @@ struct hl_dbg_device_entry {
#define HL_STR_MAX 32
-#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_NEEDS_RESET + 1)
+#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_LAST + 1)
/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
* x16 cards. In extreme cases, there are hosts that can accommodate 16 cards.
@@ -1946,11 +2143,13 @@ struct hwmon_chip_info;
* @wq: work queue for device reset procedure.
* @reset_work: reset work to be done.
* @hdev: habanalabs device structure.
+ * @fw_reset: whether f/w will do the reset without us sending them a message to do it.
*/
struct hl_device_reset_work {
struct workqueue_struct *wq;
struct delayed_work reset_work;
struct hl_device *hdev;
+ bool fw_reset;
};
/**
@@ -2065,6 +2264,58 @@ struct hl_mmu_funcs {
};
/**
+ * number of user contexts allowed to call wait_for_multi_cs ioctl in
+ * parallel
+ */
+#define MULTI_CS_MAX_USER_CTX 2
+
+/**
+ * struct multi_cs_completion - multi CS wait completion.
+ * @completion: completion of any of the CS in the list
+ * @lock: spinlock for the completion structure
+ * @timestamp: timestamp for the multi-CS completion
+ * @stream_master_qid_map: bitmap of all stream masters on which the multi-CS
+ * is waiting
+ * @used: 1 if in use, otherwise 0
+ */
+struct multi_cs_completion {
+ struct completion completion;
+ spinlock_t lock;
+ s64 timestamp;
+ u32 stream_master_qid_map;
+ u8 used;
+};
+
+/**
+ * struct multi_cs_data - internal data for multi CS call
+ * @ctx: pointer to the context structure
+ * @fence_arr: array of fences of all CSs
+ * @seq_arr: array of CS sequence numbers
+ * @timeout_us: timeout in usec for waiting for CS to complete
+ * @timestamp: timestamp of first completed CS
+ * @wait_status: wait for CS status
+ * @completion_bitmap: bitmap of completed CSs (1- completed, otherwise 0)
+ * @stream_master_qid_map: bitmap of all stream master QIDs on which the
+ * multi-CS is waiting
+ * @arr_len: fence_arr and seq_arr array length
+ * @gone_cs: indication of gone CS (1- there was gone CS, otherwise 0)
+ * @update_ts: update timestamp. 1- update the timestamp, otherwise 0.
+ */
+struct multi_cs_data {
+ struct hl_ctx *ctx;
+ struct hl_fence **fence_arr;
+ u64 *seq_arr;
+ s64 timeout_us;
+ s64 timestamp;
+ long wait_status;
+ u32 completion_bitmap;
+ u32 stream_master_qid_map;
+ u8 arr_len;
+ u8 gone_cs;
+ u8 update_ts;
+};
+
+/**
* struct hl_device - habanalabs device structure.
* @pdev: pointer to PCI device, can be NULL in case of simulator device.
* @pcie_bar_phys: array of available PCIe bars physical addresses.
@@ -2129,6 +2380,8 @@ struct hl_mmu_funcs {
* @mmu_func: device-related MMU functions.
* @fw_loader: FW loader manager.
* @pci_mem_region: array of memory regions in the PCI
+ * @state_dump_specs: constants and dictionaries needed to dump system state.
+ * @multi_cs_completion: array of multi-CS completion.
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -2205,6 +2458,7 @@ struct hl_mmu_funcs {
* halted. We can't halt it again because the COMMS
* protocol will throw an error. Relevant only for
* cases where Linux was not loaded to device CPU
+ * @supports_wait_for_multi_cs: true if wait for multi CS is supported
*/
struct hl_device {
struct pci_dev *pdev;
@@ -2273,6 +2527,11 @@ struct hl_device {
struct pci_mem_region pci_mem_region[PCI_REGION_NUMBER];
+ struct hl_state_dump_specs state_dump_specs;
+
+ struct multi_cs_completion multi_cs_completion[
+ MULTI_CS_MAX_USER_CTX];
+ u32 *stream_master_qid_arr;
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
@@ -2322,6 +2581,8 @@ struct hl_device {
u8 curr_reset_cause;
u8 skip_reset_on_timeout;
u8 device_cpu_is_halted;
+ u8 supports_wait_for_multi_cs;
+ u8 stream_master_qid_arr_size;
/* Parameters for bring-up */
u64 nic_ports_mask;
@@ -2343,6 +2604,29 @@ struct hl_device {
};
+/**
+ * struct hl_cs_encaps_sig_handle - encapsulated signals handle structure
+ * @refcount: refcount used to protect removing this id when several
+ * wait cs are used to wait of the reserved encaps signals.
+ * @hdev: pointer to habanalabs device structure.
+ * @hw_sob: pointer to H/W SOB used in the reservation.
+ * @cs_seq: staged cs sequence which contains encapsulated signals
+ * @id: idr handler id to be used to fetch the handler info
+ * @q_idx: stream queue index
+ * @pre_sob_val: current SOB value before reservation
+ * @count: signals number
+ */
+struct hl_cs_encaps_sig_handle {
+ struct kref refcount;
+ struct hl_device *hdev;
+ struct hl_hw_sob *hw_sob;
+ u64 cs_seq;
+ u32 id;
+ u32 q_idx;
+ u32 pre_sob_val;
+ u32 count;
+};
+
/*
* IOCTLs
*/
@@ -2373,6 +2657,23 @@ struct hl_ioctl_desc {
*/
/**
+ * hl_get_sg_info() - get number of pages and the DMA address from SG list.
+ * @sg: the SG list.
+ * @dma_addr: pointer to DMA address to return.
+ *
+ * Calculate the number of consecutive pages described by the SG list. Take the
+ * offset of the address in the first page, add to it the length and round it up
+ * to the number of needed pages.
+ */
+static inline u32 hl_get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
+{
+ *dma_addr = sg_dma_address(sg);
+
+ return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+}
+
+/**
* hl_mem_area_inside_range() - Checks whether address+size are inside a range.
* @address: The start address of the area we want to validate.
* @size: The size in bytes of the area we want to validate.
@@ -2436,7 +2737,9 @@ void destroy_hdev(struct hl_device *hdev);
int hl_hw_queues_create(struct hl_device *hdev);
void hl_hw_queues_destroy(struct hl_device *hdev);
int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
- u32 cb_size, u64 cb_ptr);
+ u32 cb_size, u64 cb_ptr);
+void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
+ u32 ctl, u32 len, u64 ptr);
int hl_hw_queue_schedule_cs(struct hl_cs *cs);
u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
@@ -2470,6 +2773,8 @@ void hl_ctx_do_release(struct kref *ref);
void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
int hl_ctx_put(struct hl_ctx *ctx);
struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
+int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
+ struct hl_fence **fence, u32 arr_len);
void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
@@ -2511,18 +2816,19 @@ int hl_cb_va_pool_init(struct hl_ctx *ctx);
void hl_cb_va_pool_fini(struct hl_ctx *ctx);
void hl_cs_rollback_all(struct hl_device *hdev);
-void hl_pending_cb_list_flush(struct hl_ctx *ctx);
struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
void hl_sob_reset_error(struct kref *ref);
int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask);
void hl_fence_put(struct hl_fence *fence);
+void hl_fences_put(struct hl_fence **fence, int len);
void hl_fence_get(struct hl_fence *fence);
void cs_get(struct hl_cs *cs);
bool cs_needs_completion(struct hl_cs *cs);
bool cs_needs_timeout(struct hl_cs *cs);
bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs);
struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq);
+void hl_multi_cs_completion_init(struct hl_device *hdev);
void goya_set_asic_funcs(struct hl_device *hdev);
void gaudi_set_asic_funcs(struct hl_device *hdev);
@@ -2650,9 +2956,25 @@ int hl_set_voltage(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
int hl_set_current(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
+void hw_sob_get(struct hl_hw_sob *hw_sob);
+void hw_sob_put(struct hl_hw_sob *hw_sob);
+void hl_encaps_handle_do_release(struct kref *ref);
+void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
+ struct hl_cs *cs, struct hl_cs_job *job,
+ struct hl_cs_compl *cs_cmpl);
void hl_release_pending_user_interrupts(struct hl_device *hdev);
int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
- struct hl_hw_sob **hw_sob, u32 count);
+ struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig);
+
+int hl_state_dump(struct hl_device *hdev);
+const char *hl_state_dump_get_sync_name(struct hl_device *hdev, u32 sync_id);
+const char *hl_state_dump_get_monitor_name(struct hl_device *hdev,
+ struct hl_mon_state_dump *mon);
+void hl_state_dump_free_sync_to_engine_map(struct hl_sync_to_engine_map *map);
+__printf(4, 5) int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
+ const char *format, ...);
+char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
+const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
#ifdef CONFIG_DEBUG_FS
@@ -2673,6 +2995,8 @@ void hl_debugfs_remove_userptr(struct hl_device *hdev,
struct hl_userptr *userptr);
void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
+void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
+ unsigned long length);
#else
@@ -2746,6 +3070,11 @@ static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
{
}
+static inline void hl_debugfs_set_state_dump(struct hl_device *hdev,
+ char *data, unsigned long length)
+{
+}
+
#endif
/* IOCTLs */
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 4194cda2d04c..a75e4fceb9d8 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -141,7 +141,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
hl_cb_mgr_init(&hpriv->cb_mgr);
hl_ctx_mgr_init(&hpriv->ctx_mgr);
- hpriv->taskpid = find_get_pid(current->pid);
+ hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
mutex_lock(&hdev->fpriv_list_lock);
@@ -194,7 +194,6 @@ int hl_device_open(struct inode *inode, struct file *filp)
out_err:
mutex_unlock(&hdev->fpriv_list_lock);
-
hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
filp->private_data = NULL;
@@ -318,12 +317,16 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
hdev->asic_prop.fw_security_enabled = false;
/* Assign status description string */
- strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION],
- "disabled", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_OPERATIONAL],
+ "operational", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET],
"in reset", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION],
+ "disabled", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET],
"needs reset", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_IN_DEVICE_CREATION],
+ "in device creation", HL_STR_MAX);
hdev->major = hl_major;
hdev->reset_on_lockup = reset_on_lockup;
@@ -532,7 +535,7 @@ hl_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state)
result = PCI_ERS_RESULT_NONE;
}
- hdev->asic_funcs->halt_engines(hdev, true);
+ hdev->asic_funcs->halt_engines(hdev, true, false);
return result;
}
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index f4dda7b4acdd..86c3257d9ae1 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -94,6 +94,8 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.first_available_interrupt_id =
prop->first_available_user_msix_interrupt;
+ hw_ip.server_type = prop->server_type;
+
return copy_to_user(out, &hw_ip,
min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
}
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index bcabfdbf1e01..76b7de8f1406 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -65,7 +65,7 @@ void hl_hw_queue_update_ci(struct hl_cs *cs)
}
/*
- * ext_and_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
+ * hl_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
* H/W queue.
* @hdev: pointer to habanalabs device structure
* @q: pointer to habanalabs queue structure
@@ -80,8 +80,8 @@ void hl_hw_queue_update_ci(struct hl_cs *cs)
* This function must be called when the scheduler mutex is taken
*
*/
-static void ext_and_hw_queue_submit_bd(struct hl_device *hdev,
- struct hl_hw_queue *q, u32 ctl, u32 len, u64 ptr)
+void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
+ u32 ctl, u32 len, u64 ptr)
{
struct hl_bd *bd;
@@ -222,8 +222,8 @@ static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
* @cb_size: size of CB
* @cb_ptr: pointer to CB location
*
- * This function sends a single CB, that must NOT generate a completion entry
- *
+ * This function sends a single CB, that must NOT generate a completion entry.
+ * Sending CPU messages can be done instead via 'hl_hw_queue_submit_bd()'
*/
int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
u32 cb_size, u64 cb_ptr)
@@ -231,16 +231,7 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
int rc = 0;
- /*
- * The CPU queue is a synchronous queue with an effective depth of
- * a single entry (although it is allocated with room for multiple
- * entries). Therefore, there is a different lock, called
- * send_cpu_message_lock, that serializes accesses to the CPU queue.
- * As a result, we don't need to lock the access to the entire H/W
- * queues module when submitting a JOB to the CPU queue
- */
- if (q->queue_type != QUEUE_TYPE_CPU)
- hdev->asic_funcs->hw_queues_lock(hdev);
+ hdev->asic_funcs->hw_queues_lock(hdev);
if (hdev->disabled) {
rc = -EPERM;
@@ -258,11 +249,10 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
goto out;
}
- ext_and_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
+ hl_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
out:
- if (q->queue_type != QUEUE_TYPE_CPU)
- hdev->asic_funcs->hw_queues_unlock(hdev);
+ hdev->asic_funcs->hw_queues_unlock(hdev);
return rc;
}
@@ -328,7 +318,7 @@ static void ext_queue_schedule_job(struct hl_cs_job *job)
cq->pi = hl_cq_inc_ptr(cq->pi);
submit_bd:
- ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
+ hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
/*
@@ -407,7 +397,7 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
else
ptr = (u64) (uintptr_t) job->user_cb;
- ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
+ hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
static int init_signal_cs(struct hl_device *hdev,
@@ -426,8 +416,9 @@ static int init_signal_cs(struct hl_device *hdev,
cs_cmpl->sob_val = prop->next_sob_val;
dev_dbg(hdev->dev,
- "generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
- cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
+ "generate signal CB, sob_id: %d, sob val: %u, q_idx: %d, seq: %llu\n",
+ cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx,
+ cs_cmpl->cs_seq);
/* we set an EB since we must make sure all oeprations are done
* when sending the signal
@@ -435,17 +426,37 @@ static int init_signal_cs(struct hl_device *hdev,
hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
cs_cmpl->hw_sob->sob_id, 0, true);
- rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1);
+ rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1,
+ false);
return rc;
}
-static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
+void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
+ struct hl_cs *cs, struct hl_cs_job *job,
+ struct hl_cs_compl *cs_cmpl)
+{
+ struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
+
+ cs_cmpl->hw_sob = handle->hw_sob;
+
+ /* Note that encaps_sig_wait_offset was validated earlier in the flow
+ * for offset value which exceeds the max reserved signal count.
+ * always decrement 1 of the offset since when the user
+ * set offset 1 for example he mean to wait only for the first
+ * signal only, which will be pre_sob_val, and if he set offset 2
+ * then the value required is (pre_sob_val + 1) and so on...
+ */
+ cs_cmpl->sob_val = handle->pre_sob_val +
+ (job->encaps_sig_wait_offset - 1);
+}
+
+static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
{
- struct hl_cs_compl *signal_cs_cmpl;
- struct hl_sync_stream_properties *prop;
struct hl_gen_wait_properties wait_prop;
+ struct hl_sync_stream_properties *prop;
+ struct hl_cs_compl *signal_cs_cmpl;
u32 q_idx;
q_idx = job->hw_queue_id;
@@ -455,14 +466,51 @@ static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
struct hl_cs_compl,
base_fence);
- /* copy the SOB id and value of the signal CS */
- cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
- cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+ if (cs->encaps_signals) {
+ /* use the encaps signal handle stored earlier in the flow
+ * and set the SOB information from the encaps
+ * signals handle
+ */
+ hl_hw_queue_encaps_sig_set_sob_info(hdev, cs, job, cs_cmpl);
+
+ dev_dbg(hdev->dev, "Wait for encaps signals handle, qidx(%u), CS sequence(%llu), sob val: 0x%x, offset: %u\n",
+ cs->encaps_sig_hdl->q_idx,
+ cs->encaps_sig_hdl->cs_seq,
+ cs_cmpl->sob_val,
+ job->encaps_sig_wait_offset);
+ } else {
+ /* Copy the SOB id and value of the signal CS */
+ cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
+ cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+ }
+
+ /* check again if the signal cs already completed.
+ * if yes then don't send any wait cs since the hw_sob
+ * could be in reset already. if signal is not completed
+ * then get refcount to hw_sob to prevent resetting the sob
+ * while wait cs is not submitted.
+ * note that this check is protected by two locks,
+ * hw queue lock and completion object lock,
+ * and the same completion object lock also protects
+ * the hw_sob reset handler function.
+ * The hw_queue lock prevent out of sync of hw_sob
+ * refcount value, changed by signal/wait flows.
+ */
+ spin_lock(&signal_cs_cmpl->lock);
+
+ if (completion_done(&cs->signal_fence->completion)) {
+ spin_unlock(&signal_cs_cmpl->lock);
+ return -EINVAL;
+ }
+
+ kref_get(&cs_cmpl->hw_sob->kref);
+
+ spin_unlock(&signal_cs_cmpl->lock);
dev_dbg(hdev->dev,
- "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n",
+ "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d, seq: %llu\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
- prop->base_mon_id, q_idx);
+ prop->base_mon_id, q_idx, cs->sequence);
wait_prop.data = (void *) job->patched_cb;
wait_prop.sob_base = cs_cmpl->hw_sob->sob_id;
@@ -471,17 +519,14 @@ static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
wait_prop.mon_id = prop->base_mon_id;
wait_prop.q_idx = q_idx;
wait_prop.size = 0;
+
hdev->asic_funcs->gen_wait_cb(hdev, &wait_prop);
- kref_get(&cs_cmpl->hw_sob->kref);
- /*
- * Must put the signal fence after the SOB refcnt increment so
- * the SOB refcnt won't turn 0 and reset the SOB before the
- * wait CS was submitted.
- */
mb();
hl_fence_put(cs->signal_fence);
cs->signal_fence = NULL;
+
+ return 0;
}
/*
@@ -506,7 +551,60 @@ static int init_signal_wait_cs(struct hl_cs *cs)
if (cs->type & CS_TYPE_SIGNAL)
rc = init_signal_cs(hdev, job, cs_cmpl);
else if (cs->type & CS_TYPE_WAIT)
- init_wait_cs(hdev, cs, job, cs_cmpl);
+ rc = init_wait_cs(hdev, cs, job, cs_cmpl);
+
+ return rc;
+}
+
+static int encaps_sig_first_staged_cs_handler
+ (struct hl_device *hdev, struct hl_cs *cs)
+{
+ struct hl_cs_compl *cs_cmpl =
+ container_of(cs->fence,
+ struct hl_cs_compl, base_fence);
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
+ struct hl_encaps_signals_mgr *mgr;
+ int rc = 0;
+
+ mgr = &hdev->compute_ctx->sig_mgr;
+
+ spin_lock(&mgr->lock);
+ encaps_sig_hdl = idr_find(&mgr->handles, cs->encaps_sig_hdl_id);
+ if (encaps_sig_hdl) {
+ /*
+ * Set handler CS sequence,
+ * the CS which contains the encapsulated signals.
+ */
+ encaps_sig_hdl->cs_seq = cs->sequence;
+ /* store the handle and set encaps signal indication,
+ * to be used later in cs_do_release to put the last
+ * reference to encaps signals handlers.
+ */
+ cs_cmpl->encaps_signals = true;
+ cs_cmpl->encaps_sig_hdl = encaps_sig_hdl;
+
+ /* set hw_sob pointer in completion object
+ * since it's used in cs_do_release flow to put
+ * refcount to sob
+ */
+ cs_cmpl->hw_sob = encaps_sig_hdl->hw_sob;
+ cs_cmpl->sob_val = encaps_sig_hdl->pre_sob_val +
+ encaps_sig_hdl->count;
+
+ dev_dbg(hdev->dev, "CS seq (%llu) added to encaps signal handler id (%u), count(%u), qidx(%u), sob(%u), val(%u)\n",
+ cs->sequence, encaps_sig_hdl->id,
+ encaps_sig_hdl->count,
+ encaps_sig_hdl->q_idx,
+ cs_cmpl->hw_sob->sob_id,
+ cs_cmpl->sob_val);
+
+ } else {
+ dev_err(hdev->dev, "encaps handle id(%u) wasn't found!\n",
+ cs->encaps_sig_hdl_id);
+ rc = -EINVAL;
+ }
+
+ spin_unlock(&mgr->lock);
return rc;
}
@@ -581,14 +679,21 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT)) {
rc = init_signal_wait_cs(cs);
- if (rc) {
- dev_err(hdev->dev, "Failed to submit signal cs\n");
+ if (rc)
goto unroll_cq_resv;
- }
- } else if (cs->type == CS_TYPE_COLLECTIVE_WAIT)
- hdev->asic_funcs->collective_wait_init_cs(cs);
+ } else if (cs->type == CS_TYPE_COLLECTIVE_WAIT) {
+ rc = hdev->asic_funcs->collective_wait_init_cs(cs);
+ if (rc)
+ goto unroll_cq_resv;
+ }
+ if (cs->encaps_signals && cs->staged_first) {
+ rc = encaps_sig_first_staged_cs_handler(hdev, cs);
+ if (rc)
+ goto unroll_cq_resv;
+ }
+
spin_lock(&hdev->cs_mirror_lock);
/* Verify staged CS exists and add to the staged list */
@@ -613,6 +718,11 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
}
list_add_tail(&cs->staged_cs_node, &staged_cs->staged_cs_node);
+
+ /* update stream map of the first CS */
+ if (hdev->supports_wait_for_multi_cs)
+ staged_cs->fence->stream_master_qid_map |=
+ cs->fence->stream_master_qid_map;
}
list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list);
@@ -834,6 +944,8 @@ static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx)
hw_sob = &sync_stream_prop->hw_sob[sob];
hw_sob->hdev = hdev;
hw_sob->sob_id = sync_stream_prop->base_sob_id + sob;
+ hw_sob->sob_addr =
+ hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
hw_sob->q_idx = q_idx;
kref_init(&hw_sob->kref);
}
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index af339ce1ab4f..33986933aa9e 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -124,7 +124,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
spin_lock(&vm->idr_lock);
handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
- GFP_KERNEL);
+ GFP_ATOMIC);
spin_unlock(&vm->idr_lock);
if (handle < 0) {
@@ -529,6 +529,33 @@ static inline int add_va_block(struct hl_device *hdev,
}
/**
+ * is_hint_crossing_range() - check if hint address crossing specified reserved
+ * range.
+ */
+static inline bool is_hint_crossing_range(enum hl_va_range_type range_type,
+ u64 start_addr, u32 size, struct asic_fixed_properties *prop) {
+ bool range_cross;
+
+ if (range_type == HL_VA_RANGE_TYPE_DRAM)
+ range_cross =
+ hl_mem_area_crosses_range(start_addr, size,
+ prop->hints_dram_reserved_va_range.start_addr,
+ prop->hints_dram_reserved_va_range.end_addr);
+ else if (range_type == HL_VA_RANGE_TYPE_HOST)
+ range_cross =
+ hl_mem_area_crosses_range(start_addr, size,
+ prop->hints_host_reserved_va_range.start_addr,
+ prop->hints_host_reserved_va_range.end_addr);
+ else
+ range_cross =
+ hl_mem_area_crosses_range(start_addr, size,
+ prop->hints_host_hpage_reserved_va_range.start_addr,
+ prop->hints_host_hpage_reserved_va_range.end_addr);
+
+ return range_cross;
+}
+
+/**
* get_va_block() - get a virtual block for the given size and alignment.
*
* @hdev: pointer to the habanalabs device structure.
@@ -536,6 +563,8 @@ static inline int add_va_block(struct hl_device *hdev,
* @size: requested block size.
* @hint_addr: hint for requested address by the user.
* @va_block_align: required alignment of the virtual block start address.
+ * @range_type: va range type (host, dram)
+ * @flags: additional memory flags, currently only uses HL_MEM_FORCE_HINT
*
* This function does the following:
* - Iterate on the virtual block list to find a suitable virtual block for the
@@ -545,13 +574,19 @@ static inline int add_va_block(struct hl_device *hdev,
*/
static u64 get_va_block(struct hl_device *hdev,
struct hl_va_range *va_range,
- u64 size, u64 hint_addr, u32 va_block_align)
+ u64 size, u64 hint_addr, u32 va_block_align,
+ enum hl_va_range_type range_type,
+ u32 flags)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end,
- align_mask, reserved_valid_start = 0, reserved_valid_size = 0;
+ align_mask, reserved_valid_start = 0, reserved_valid_size = 0,
+ dram_hint_mask = prop->dram_hints_align_mask;
bool add_prev = false;
bool is_align_pow_2 = is_power_of_2(va_range->page_size);
+ bool is_hint_dram_addr = hl_is_dram_va(hdev, hint_addr);
+ bool force_hint = flags & HL_MEM_FORCE_HINT;
if (is_align_pow_2)
align_mask = ~((u64)va_block_align - 1);
@@ -564,12 +599,20 @@ static u64 get_va_block(struct hl_device *hdev,
size = DIV_ROUND_UP_ULL(size, va_range->page_size) *
va_range->page_size;
- tmp_hint_addr = hint_addr;
+ tmp_hint_addr = hint_addr & ~dram_hint_mask;
/* Check if we need to ignore hint address */
if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
- (!is_align_pow_2 &&
- do_div(tmp_hint_addr, va_range->page_size))) {
+ (!is_align_pow_2 && is_hint_dram_addr &&
+ do_div(tmp_hint_addr, va_range->page_size))) {
+
+ if (force_hint) {
+ /* Hint must be respected, so here we just fail */
+ dev_err(hdev->dev,
+ "Hint address 0x%llx is not page aligned - cannot be respected\n",
+ hint_addr);
+ return 0;
+ }
dev_dbg(hdev->dev,
"Hint address 0x%llx will be ignored because it is not aligned\n",
@@ -596,6 +639,16 @@ static u64 get_va_block(struct hl_device *hdev,
if (valid_size < size)
continue;
+ /*
+ * In case hint address is 0, and arc_hints_range_reservation
+ * property enabled, then avoid allocating va blocks from the
+ * range reserved for hint addresses
+ */
+ if (prop->hints_range_reservation && !hint_addr)
+ if (is_hint_crossing_range(range_type, valid_start,
+ size, prop))
+ continue;
+
/* Pick the minimal length block which has the required size */
if (!new_va_block || (valid_size < reserved_valid_size)) {
new_va_block = va_block;
@@ -618,6 +671,17 @@ static u64 get_va_block(struct hl_device *hdev,
goto out;
}
+ if (force_hint && reserved_valid_start != hint_addr) {
+ /* Hint address must be respected. If we are here - this means
+ * we could not respect it.
+ */
+ dev_err(hdev->dev,
+ "Hint address 0x%llx could not be respected\n",
+ hint_addr);
+ reserved_valid_start = 0;
+ goto out;
+ }
+
/*
* Check if there is some leftover range due to reserving the new
* va block, then return it to the main virtual addresses list.
@@ -670,7 +734,8 @@ u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
enum hl_va_range_type type, u32 size, u32 alignment)
{
return get_va_block(hdev, ctx->va_range[type], size, 0,
- max(alignment, ctx->va_range[type]->page_size));
+ max(alignment, ctx->va_range[type]->page_size),
+ type, 0);
}
/**
@@ -732,28 +797,15 @@ int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
}
/**
- * get_sg_info() - get number of pages and the DMA address from SG list.
- * @sg: the SG list.
- * @dma_addr: pointer to DMA address to return.
- *
- * Calculate the number of consecutive pages described by the SG list. Take the
- * offset of the address in the first page, add to it the length and round it up
- * to the number of needed pages.
- */
-static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
-{
- *dma_addr = sg_dma_address(sg);
-
- return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
- (PAGE_SIZE - 1)) >> PAGE_SHIFT;
-}
-
-/**
* init_phys_pg_pack_from_userptr() - initialize physical page pack from host
* memory
* @ctx: pointer to the context structure.
* @userptr: userptr to initialize from.
* @pphys_pg_pack: result pointer.
+ * @force_regular_page: tell the function to ignore huge page optimization,
+ * even if possible. Needed for cases where the device VA
+ * is allocated before we know the composition of the
+ * physical pages
*
* This function does the following:
* - Pin the physical pages related to the given virtual block.
@@ -762,17 +814,18 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
*/
static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
struct hl_userptr *userptr,
- struct hl_vm_phys_pg_pack **pphys_pg_pack)
+ struct hl_vm_phys_pg_pack **pphys_pg_pack,
+ bool force_regular_page)
{
+ u32 npages, page_size = PAGE_SIZE,
+ huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
+ u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
struct hl_vm_phys_pg_pack *phys_pg_pack;
+ bool first = true, is_huge_page_opt;
+ u64 page_mask, total_npages;
struct scatterlist *sg;
dma_addr_t dma_addr;
- u64 page_mask, total_npages;
- u32 npages, page_size = PAGE_SIZE,
- huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
- bool first = true, is_huge_page_opt = true;
int rc, i, j;
- u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
if (!phys_pg_pack)
@@ -783,6 +836,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
phys_pg_pack->asid = ctx->asid;
atomic_set(&phys_pg_pack->mapping_cnt, 1);
+ is_huge_page_opt = (force_regular_page ? false : true);
+
/* Only if all dma_addrs are aligned to 2MB and their
* sizes is at least 2MB, we can use huge page mapping.
* We limit the 2MB optimization to this condition,
@@ -791,7 +846,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
*/
total_npages = 0;
for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
- npages = get_sg_info(sg, &dma_addr);
+ npages = hl_get_sg_info(sg, &dma_addr);
total_npages += npages;
@@ -820,7 +875,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
j = 0;
for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
- npages = get_sg_info(sg, &dma_addr);
+ npages = hl_get_sg_info(sg, &dma_addr);
/* align down to physical page size and save the offset */
if (first) {
@@ -1001,11 +1056,12 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
struct hl_userptr *userptr = NULL;
struct hl_vm_hash_node *hnode;
struct hl_va_range *va_range;
- enum vm_type_t *vm_type;
+ enum vm_type *vm_type;
u64 ret_vaddr, hint_addr;
u32 handle = 0, va_block_align;
int rc;
bool is_userptr = args->flags & HL_MEM_USERPTR;
+ enum hl_va_range_type va_range_type = 0;
/* Assume failure */
*device_addr = 0;
@@ -1023,7 +1079,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
}
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
- &phys_pg_pack);
+ &phys_pg_pack, false);
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
@@ -1031,14 +1087,14 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto init_page_pack_err;
}
- vm_type = (enum vm_type_t *) userptr;
+ vm_type = (enum vm_type *) userptr;
hint_addr = args->map_host.hint_addr;
handle = phys_pg_pack->handle;
/* get required alignment */
if (phys_pg_pack->page_size == page_size) {
va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
-
+ va_range_type = HL_VA_RANGE_TYPE_HOST;
/*
* huge page alignment may be needed in case of regular
* page mapping, depending on the host VA alignment
@@ -1053,6 +1109,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
* mapping
*/
va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
+ va_range_type = HL_VA_RANGE_TYPE_HOST_HUGE;
va_block_align = huge_page_size;
}
} else {
@@ -1072,12 +1129,13 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
spin_unlock(&vm->idr_lock);
- vm_type = (enum vm_type_t *) phys_pg_pack;
+ vm_type = (enum vm_type *) phys_pg_pack;
hint_addr = args->map_device.hint_addr;
/* DRAM VA alignment is the same as the MMU page size */
va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
+ va_range_type = HL_VA_RANGE_TYPE_DRAM;
va_block_align = hdev->asic_prop.dmmu.page_size;
}
@@ -1100,8 +1158,23 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto hnode_err;
}
+ if (hint_addr && phys_pg_pack->offset) {
+ if (args->flags & HL_MEM_FORCE_HINT) {
+ /* Fail if hint must be respected but it can't be */
+ dev_err(hdev->dev,
+ "Hint address 0x%llx cannot be respected because source memory is not aligned 0x%x\n",
+ hint_addr, phys_pg_pack->offset);
+ rc = -EINVAL;
+ goto va_block_err;
+ }
+ dev_dbg(hdev->dev,
+ "Hint address 0x%llx will be ignored because source memory is not aligned 0x%x\n",
+ hint_addr, phys_pg_pack->offset);
+ }
+
ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
- hint_addr, va_block_align);
+ hint_addr, va_block_align,
+ va_range_type, args->flags);
if (!ret_vaddr) {
dev_err(hdev->dev, "no available va block for handle %u\n",
handle);
@@ -1181,17 +1254,19 @@ init_page_pack_err:
static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
bool ctx_free)
{
- struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
+ u64 vaddr = args->unmap.device_virt_addr;
struct hl_vm_hash_node *hnode = NULL;
+ struct asic_fixed_properties *prop;
+ struct hl_device *hdev = ctx->hdev;
struct hl_userptr *userptr = NULL;
struct hl_va_range *va_range;
- u64 vaddr = args->unmap.device_virt_addr;
- enum vm_type_t *vm_type;
+ enum vm_type *vm_type;
bool is_userptr;
int rc = 0;
+ prop = &hdev->asic_prop;
+
/* protect from double entrance */
mutex_lock(&ctx->mem_hash_lock);
hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
@@ -1214,8 +1289,9 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
if (*vm_type == VM_TYPE_USERPTR) {
is_userptr = true;
userptr = hnode->ptr;
- rc = init_phys_pg_pack_from_userptr(ctx, userptr,
- &phys_pg_pack);
+
+ rc = init_phys_pg_pack_from_userptr(ctx, userptr, &phys_pg_pack,
+ false);
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
@@ -1299,7 +1375,7 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
kfree(hnode);
if (is_userptr) {
- rc = free_phys_pg_pack(hdev, phys_pg_pack);
+ free_phys_pg_pack(hdev, phys_pg_pack);
dma_unmap_host_va(hdev, userptr);
}
@@ -1669,6 +1745,7 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EINVAL;
}
+ userptr->pid = current->pid;
userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL);
if (!userptr->sgt)
return -ENOMEM;
@@ -2033,7 +2110,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
* another side effect error
*/
if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
- dev_notice(hdev->dev,
+ dev_dbg(hdev->dev,
"user released device without removing its memory mappings\n");
hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
index c5e93ff32586..0f536f79dd9c 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
@@ -470,13 +470,13 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
- }
- /* Make sure that if we arrive here again without init was called we
- * won't cause kernel panic. This can happen for example if we fail
- * during hard reset code at certain points
- */
- hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
+ /* Make sure that if we arrive here again without init was
+ * called we won't cause kernel panic. This can happen for
+ * example if we fail during hard reset code at certain points
+ */
+ hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
+ }
}
/**
diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/misc/habanalabs/common/pci/pci.c
index d5bedf5ba011..0b5366cc84fd 100644
--- a/drivers/misc/habanalabs/common/pci/pci.c
+++ b/drivers/misc/habanalabs/common/pci/pci.c
@@ -436,6 +436,8 @@ int hl_pci_init(struct hl_device *hdev)
goto unmap_pci_bars;
}
+ dma_set_max_seg_size(&pdev->dev, U32_MAX);
+
return 0;
unmap_pci_bars:
diff --git a/drivers/misc/habanalabs/common/state_dump.c b/drivers/misc/habanalabs/common/state_dump.c
new file mode 100644
index 000000000000..74726907c95e
--- /dev/null
+++ b/drivers/misc/habanalabs/common/state_dump.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2021 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+
+/**
+ * hl_format_as_binary - helper function, format an integer as binary
+ * using supplied scratch buffer
+ * @buf: the buffer to use
+ * @buf_len: buffer capacity
+ * @n: number to format
+ *
+ * Returns pointer to buffer
+ */
+char *hl_format_as_binary(char *buf, size_t buf_len, u32 n)
+{
+ int i;
+ u32 bit;
+ bool leading0 = true;
+ char *wrptr = buf;
+
+ if (buf_len > 0 && buf_len < 3) {
+ *wrptr = '\0';
+ return buf;
+ }
+
+ wrptr[0] = '0';
+ wrptr[1] = 'b';
+ wrptr += 2;
+ /* Remove 3 characters from length for '0b' and '\0' termination */
+ buf_len -= 3;
+
+ for (i = 0; i < sizeof(n) * BITS_PER_BYTE && buf_len; ++i, n <<= 1) {
+ /* Writing bit calculation in one line would cause a false
+ * positive static code analysis error, so splitting.
+ */
+ bit = n & (1 << (sizeof(n) * BITS_PER_BYTE - 1));
+ bit = !!bit;
+ leading0 &= !bit;
+ if (!leading0) {
+ *wrptr = '0' + bit;
+ ++wrptr;
+ }
+ }
+
+ *wrptr = '\0';
+
+ return buf;
+}
+
+/**
+ * resize_to_fit - helper function, resize buffer to fit given amount of data
+ * @buf: destination buffer double pointer
+ * @size: pointer to the size container
+ * @desired_size: size the buffer must contain
+ *
+ * Returns 0 on success or error code on failure.
+ * On success, the size of buffer is at least desired_size. Buffer is allocated
+ * via vmalloc and must be freed with vfree.
+ */
+static int resize_to_fit(char **buf, size_t *size, size_t desired_size)
+{
+ char *resized_buf;
+ size_t new_size;
+
+ if (*size >= desired_size)
+ return 0;
+
+ /* Not enough space to print all, have to resize */
+ new_size = max_t(size_t, PAGE_SIZE, round_up(desired_size, PAGE_SIZE));
+ resized_buf = vmalloc(new_size);
+ if (!resized_buf)
+ return -ENOMEM;
+ memcpy(resized_buf, *buf, *size);
+ vfree(*buf);
+ *buf = resized_buf;
+ *size = new_size;
+
+ return 1;
+}
+
+/**
+ * hl_snprintf_resize() - print formatted data to buffer, resize as needed
+ * @buf: buffer double pointer, to be written to and resized, must be either
+ * NULL or allocated with vmalloc.
+ * @size: current size of the buffer
+ * @offset: current offset to write to
+ * @format: format of the data
+ *
+ * This function will write formatted data into the buffer. If buffer is not
+ * large enough, it will be resized using vmalloc. Size may be modified if the
+ * buffer was resized, offset will be advanced by the number of bytes written
+ * not including the terminating character
+ *
+ * Returns 0 on success or error code on failure
+ *
+ * Note that the buffer has to be manually released using vfree.
+ */
+int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
+ const char *format, ...)
+{
+ va_list args;
+ size_t length;
+ int rc;
+
+ if (*buf == NULL && (*size != 0 || *offset != 0))
+ return -EINVAL;
+
+ va_start(args, format);
+ length = vsnprintf(*buf + *offset, *size - *offset, format, args);
+ va_end(args);
+
+ rc = resize_to_fit(buf, size, *offset + length + 1);
+ if (rc < 0)
+ return rc;
+ else if (rc > 0) {
+ /* Resize was needed, write again */
+ va_start(args, format);
+ length = vsnprintf(*buf + *offset, *size - *offset, format,
+ args);
+ va_end(args);
+ }
+
+ *offset += length;
+
+ return 0;
+}
+
+/**
+ * hl_sync_engine_to_string - convert engine type enum to string literal
+ * @engine_type: engine type (TPC/MME/DMA)
+ *
+ * Return the resolved string literal
+ */
+const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type)
+{
+ switch (engine_type) {
+ case ENGINE_DMA:
+ return "DMA";
+ case ENGINE_MME:
+ return "MME";
+ case ENGINE_TPC:
+ return "TPC";
+ }
+ return "Invalid Engine Type";
+}
+
+/**
+ * hl_print_resize_sync_engine - helper function, format engine name and ID
+ * using hl_snprintf_resize
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ * @engine_type: engine type (TPC/MME/DMA)
+ * @engine_id: engine numerical id
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int hl_print_resize_sync_engine(char **buf, size_t *size, size_t *offset,
+ enum hl_sync_engine_type engine_type,
+ u32 engine_id)
+{
+ return hl_snprintf_resize(buf, size, offset, "%s%u",
+ hl_sync_engine_to_string(engine_type), engine_id);
+}
+
+/**
+ * hl_state_dump_get_sync_name - transform sync object id to name if available
+ * @hdev: pointer to the device
+ * @sync_id: sync object id
+ *
+ * Returns a name literal or NULL if not resolved.
+ * Note: returning NULL shall not be considered as a failure, as not all
+ * sync objects are named.
+ */
+const char *hl_state_dump_get_sync_name(struct hl_device *hdev, u32 sync_id)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_hw_obj_name_entry *entry;
+
+ hash_for_each_possible(sds->so_id_to_str_tb, entry,
+ node, sync_id)
+ if (sync_id == entry->id)
+ return entry->name;
+
+ return NULL;
+}
+
+/**
+ * hl_state_dump_get_monitor_name - transform monitor object dump to monitor
+ * name if available
+ * @hdev: pointer to the device
+ * @mon: monitor state dump
+ *
+ * Returns a name literal or NULL if not resolved.
+ * Note: returning NULL shall not be considered as a failure, as not all
+ * monitors are named.
+ */
+const char *hl_state_dump_get_monitor_name(struct hl_device *hdev,
+ struct hl_mon_state_dump *mon)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_hw_obj_name_entry *entry;
+
+ hash_for_each_possible(sds->monitor_id_to_str_tb,
+ entry, node, mon->id)
+ if (mon->id == entry->id)
+ return entry->name;
+
+ return NULL;
+}
+
+/**
+ * hl_state_dump_free_sync_to_engine_map - free sync object to engine map
+ * @map: sync object to engine map
+ *
+ * Note: generic free implementation, the allocation is implemented per ASIC.
+ */
+void hl_state_dump_free_sync_to_engine_map(struct hl_sync_to_engine_map *map)
+{
+ struct hl_sync_to_engine_map_entry *entry;
+ struct hlist_node *tmp_node;
+ int i;
+
+ hash_for_each_safe(map->tb, i, tmp_node, entry, node) {
+ hash_del(&entry->node);
+ kfree(entry);
+ }
+}
+
+/**
+ * hl_state_dump_get_sync_to_engine - transform sync_id to
+ * hl_sync_to_engine_map_entry if available for current id
+ * @map: sync object to engine map
+ * @sync_id: sync object id
+ *
+ * Returns the translation entry if found or NULL if not.
+ * Note, returned NULL shall not be considered as a failure as the map
+ * does not cover all possible, it is a best effort sync ids.
+ */
+static struct hl_sync_to_engine_map_entry *
+hl_state_dump_get_sync_to_engine(struct hl_sync_to_engine_map *map, u32 sync_id)
+{
+ struct hl_sync_to_engine_map_entry *entry;
+
+ hash_for_each_possible(map->tb, entry, node, sync_id)
+ if (entry->sync_id == sync_id)
+ return entry;
+ return NULL;
+}
+
+/**
+ * hl_state_dump_read_sync_objects - read sync objects array
+ * @hdev: pointer to the device
+ * @index: sync manager block index starting with E_N
+ *
+ * Returns array of size SP_SYNC_OBJ_AMOUNT on success or NULL on failure
+ */
+static u32 *hl_state_dump_read_sync_objects(struct hl_device *hdev, u32 index)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ u32 *sync_objects;
+ s64 base_addr; /* Base addr can be negative */
+ int i;
+
+ base_addr = sds->props[SP_SYNC_OBJ_BASE_ADDR] +
+ sds->props[SP_NEXT_SYNC_OBJ_ADDR] * index;
+
+ sync_objects = vmalloc(sds->props[SP_SYNC_OBJ_AMOUNT] * sizeof(u32));
+ if (!sync_objects)
+ return NULL;
+
+ for (i = 0; i < sds->props[SP_SYNC_OBJ_AMOUNT]; ++i)
+ sync_objects[i] = RREG32(base_addr + i * sizeof(u32));
+
+ return sync_objects;
+}
+
+/**
+ * hl_state_dump_free_sync_objects - free sync objects array allocated by
+ * hl_state_dump_read_sync_objects
+ * @sync_objects: sync objects array
+ */
+static void hl_state_dump_free_sync_objects(u32 *sync_objects)
+{
+ vfree(sync_objects);
+}
+
+
+/**
+ * hl_state_dump_print_syncs_single_block - print active sync objects on a
+ * single block
+ * @hdev: pointer to the device
+ * @index: sync manager block index starting with E_N
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ * @map: sync engines names map
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int
+hl_state_dump_print_syncs_single_block(struct hl_device *hdev, u32 index,
+ char **buf, size_t *size, size_t *offset,
+ struct hl_sync_to_engine_map *map)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ const char *sync_name;
+ u32 *sync_objects = NULL;
+ int rc = 0, i;
+
+ if (sds->sync_namager_names) {
+ rc = hl_snprintf_resize(
+ buf, size, offset, "%s\n",
+ sds->sync_namager_names[index]);
+ if (rc)
+ goto out;
+ }
+
+ sync_objects = hl_state_dump_read_sync_objects(hdev, index);
+ if (!sync_objects) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < sds->props[SP_SYNC_OBJ_AMOUNT]; ++i) {
+ struct hl_sync_to_engine_map_entry *entry;
+ u64 sync_object_addr;
+
+ if (!sync_objects[i])
+ continue;
+
+ sync_object_addr = sds->props[SP_SYNC_OBJ_BASE_ADDR] +
+ sds->props[SP_NEXT_SYNC_OBJ_ADDR] * index +
+ i * sizeof(u32);
+
+ rc = hl_snprintf_resize(buf, size, offset, "sync id: %u", i);
+ if (rc)
+ goto free_sync_objects;
+ sync_name = hl_state_dump_get_sync_name(hdev, i);
+ if (sync_name) {
+ rc = hl_snprintf_resize(buf, size, offset, " %s",
+ sync_name);
+ if (rc)
+ goto free_sync_objects;
+ }
+ rc = hl_snprintf_resize(buf, size, offset, ", value: %u",
+ sync_objects[i]);
+ if (rc)
+ goto free_sync_objects;
+
+ /* Append engine string */
+ entry = hl_state_dump_get_sync_to_engine(map,
+ (u32)sync_object_addr);
+ if (entry) {
+ rc = hl_snprintf_resize(buf, size, offset,
+ ", Engine: ");
+ if (rc)
+ goto free_sync_objects;
+ rc = hl_print_resize_sync_engine(buf, size, offset,
+ entry->engine_type,
+ entry->engine_id);
+ if (rc)
+ goto free_sync_objects;
+ }
+
+ rc = hl_snprintf_resize(buf, size, offset, "\n");
+ if (rc)
+ goto free_sync_objects;
+ }
+
+free_sync_objects:
+ hl_state_dump_free_sync_objects(sync_objects);
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump_print_syncs - print active sync objects
+ * @hdev: pointer to the device
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int hl_state_dump_print_syncs(struct hl_device *hdev,
+ char **buf, size_t *size,
+ size_t *offset)
+
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_sync_to_engine_map *map;
+ u32 index;
+ int rc = 0;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ rc = sds->funcs.gen_sync_to_engine_map(hdev, map);
+ if (rc)
+ goto free_map_mem;
+
+ rc = hl_snprintf_resize(buf, size, offset, "Non zero sync objects:\n");
+ if (rc)
+ goto out;
+
+ if (sds->sync_namager_names) {
+ for (index = 0; sds->sync_namager_names[index]; ++index) {
+ rc = hl_state_dump_print_syncs_single_block(
+ hdev, index, buf, size, offset, map);
+ if (rc)
+ goto out;
+ }
+ } else {
+ for (index = 0; index < sds->props[SP_NUM_CORES]; ++index) {
+ rc = hl_state_dump_print_syncs_single_block(
+ hdev, index, buf, size, offset, map);
+ if (rc)
+ goto out;
+ }
+ }
+
+out:
+ hl_state_dump_free_sync_to_engine_map(map);
+free_map_mem:
+ kfree(map);
+
+ return rc;
+}
+
+/**
+ * hl_state_dump_alloc_read_sm_block_monitors - read monitors for a specific
+ * block
+ * @hdev: pointer to the device
+ * @index: sync manager block index starting with E_N
+ *
+ * Returns an array of monitor data of size SP_MONITORS_AMOUNT or NULL
+ * on error
+ */
+static struct hl_mon_state_dump *
+hl_state_dump_alloc_read_sm_block_monitors(struct hl_device *hdev, u32 index)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_mon_state_dump *monitors;
+ s64 base_addr; /* Base addr can be negative */
+ int i;
+
+ monitors = vmalloc(sds->props[SP_MONITORS_AMOUNT] *
+ sizeof(struct hl_mon_state_dump));
+ if (!monitors)
+ return NULL;
+
+ base_addr = sds->props[SP_NEXT_SYNC_OBJ_ADDR] * index;
+
+ for (i = 0; i < sds->props[SP_MONITORS_AMOUNT]; ++i) {
+ monitors[i].id = i;
+ monitors[i].wr_addr_low =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_WR_ADDR_LOW] +
+ i * sizeof(u32));
+
+ monitors[i].wr_addr_high =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_WR_ADDR_HIGH] +
+ i * sizeof(u32));
+
+ monitors[i].wr_data =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_WR_DATA] +
+ i * sizeof(u32));
+
+ monitors[i].arm_data =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_ARM_DATA] +
+ i * sizeof(u32));
+
+ monitors[i].status =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_STATUS] +
+ i * sizeof(u32));
+ }
+
+ return monitors;
+}
+
+/**
+ * hl_state_dump_free_monitors - free the monitors structure
+ * @monitors: monitors array created with
+ * hl_state_dump_alloc_read_sm_block_monitors
+ */
+static void hl_state_dump_free_monitors(struct hl_mon_state_dump *monitors)
+{
+ vfree(monitors);
+}
+
+/**
+ * hl_state_dump_print_monitors_single_block - print active monitors on a
+ * single block
+ * @hdev: pointer to the device
+ * @index: sync manager block index starting with E_N
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int hl_state_dump_print_monitors_single_block(struct hl_device *hdev,
+ u32 index,
+ char **buf, size_t *size,
+ size_t *offset)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_mon_state_dump *monitors = NULL;
+ int rc = 0, i;
+
+ if (sds->sync_namager_names) {
+ rc = hl_snprintf_resize(
+ buf, size, offset, "%s\n",
+ sds->sync_namager_names[index]);
+ if (rc)
+ goto out;
+ }
+
+ monitors = hl_state_dump_alloc_read_sm_block_monitors(hdev, index);
+ if (!monitors) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < sds->props[SP_MONITORS_AMOUNT]; ++i) {
+ if (!(sds->funcs.monitor_valid(&monitors[i])))
+ continue;
+
+ /* Monitor is valid, dump it */
+ rc = sds->funcs.print_single_monitor(buf, size, offset, hdev,
+ &monitors[i]);
+ if (rc)
+ goto free_monitors;
+
+ hl_snprintf_resize(buf, size, offset, "\n");
+ }
+
+free_monitors:
+ hl_state_dump_free_monitors(monitors);
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump_print_monitors - print active monitors
+ * @hdev: pointer to the device
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int hl_state_dump_print_monitors(struct hl_device *hdev,
+ char **buf, size_t *size,
+ size_t *offset)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ u32 index;
+ int rc = 0;
+
+ rc = hl_snprintf_resize(buf, size, offset,
+ "Valid (armed) monitor objects:\n");
+ if (rc)
+ goto out;
+
+ if (sds->sync_namager_names) {
+ for (index = 0; sds->sync_namager_names[index]; ++index) {
+ rc = hl_state_dump_print_monitors_single_block(
+ hdev, index, buf, size, offset);
+ if (rc)
+ goto out;
+ }
+ } else {
+ for (index = 0; index < sds->props[SP_NUM_CORES]; ++index) {
+ rc = hl_state_dump_print_monitors_single_block(
+ hdev, index, buf, size, offset);
+ if (rc)
+ goto out;
+ }
+ }
+
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump_print_engine_fences - print active fences for a specific
+ * engine
+ * @hdev: pointer to the device
+ * @engine_type: engine type to use
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ */
+static int
+hl_state_dump_print_engine_fences(struct hl_device *hdev,
+ enum hl_sync_engine_type engine_type,
+ char **buf, size_t *size, size_t *offset)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ int rc = 0, i, n_fences;
+ u64 base_addr, next_fence;
+
+ switch (engine_type) {
+ case ENGINE_TPC:
+ n_fences = sds->props[SP_NUM_OF_TPC_ENGINES];
+ base_addr = sds->props[SP_TPC0_CMDQ];
+ next_fence = sds->props[SP_NEXT_TPC];
+ break;
+ case ENGINE_MME:
+ n_fences = sds->props[SP_NUM_OF_MME_ENGINES];
+ base_addr = sds->props[SP_MME_CMDQ];
+ next_fence = sds->props[SP_NEXT_MME];
+ break;
+ case ENGINE_DMA:
+ n_fences = sds->props[SP_NUM_OF_DMA_ENGINES];
+ base_addr = sds->props[SP_DMA_CMDQ];
+ next_fence = sds->props[SP_DMA_QUEUES_OFFSET];
+ break;
+ default:
+ return -EINVAL;
+ }
+ for (i = 0; i < n_fences; ++i) {
+ rc = sds->funcs.print_fences_single_engine(
+ hdev,
+ base_addr + next_fence * i +
+ sds->props[SP_FENCE0_CNT_OFFSET],
+ base_addr + next_fence * i +
+ sds->props[SP_CP_STS_OFFSET],
+ engine_type, i, buf, size, offset);
+ if (rc)
+ goto out;
+ }
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump_print_fences - print active fences
+ * @hdev: pointer to the device
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ */
+static int hl_state_dump_print_fences(struct hl_device *hdev, char **buf,
+ size_t *size, size_t *offset)
+{
+ int rc = 0;
+
+ rc = hl_snprintf_resize(buf, size, offset, "Valid (armed) fences:\n");
+ if (rc)
+ goto out;
+
+ rc = hl_state_dump_print_engine_fences(hdev, ENGINE_TPC, buf, size, offset);
+ if (rc)
+ goto out;
+
+ rc = hl_state_dump_print_engine_fences(hdev, ENGINE_MME, buf, size, offset);
+ if (rc)
+ goto out;
+
+ rc = hl_state_dump_print_engine_fences(hdev, ENGINE_DMA, buf, size, offset);
+ if (rc)
+ goto out;
+
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump() - dump system state
+ * @hdev: pointer to device structure
+ */
+int hl_state_dump(struct hl_device *hdev)
+{
+ char *buf = NULL;
+ size_t offset = 0, size = 0;
+ int rc;
+
+ rc = hl_snprintf_resize(&buf, &size, &offset,
+ "Timestamp taken on: %llu\n\n",
+ ktime_to_ns(ktime_get()));
+ if (rc)
+ goto err;
+
+ rc = hl_state_dump_print_syncs(hdev, &buf, &size, &offset);
+ if (rc)
+ goto err;
+
+ hl_snprintf_resize(&buf, &size, &offset, "\n");
+
+ rc = hl_state_dump_print_monitors(hdev, &buf, &size, &offset);
+ if (rc)
+ goto err;
+
+ hl_snprintf_resize(&buf, &size, &offset, "\n");
+
+ rc = hl_state_dump_print_fences(hdev, &buf, &size, &offset);
+ if (rc)
+ goto err;
+
+ hl_snprintf_resize(&buf, &size, &offset, "\n");
+
+ hl_debugfs_set_state_dump(hdev, buf, size);
+
+ return 0;
+err:
+ vfree(buf);
+ return rc;
+}
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index db72df282ef8..34f9f2779962 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -9,8 +9,7 @@
#include <linux/pci.h>
-long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
- bool curr)
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
{
struct cpucp_packet pkt;
u32 used_pll_idx;
@@ -44,8 +43,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
return (long) result;
}
-void hl_set_frequency(struct hl_device *hdev, u32 pll_index,
- u64 freq)
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
{
struct cpucp_packet pkt;
u32 used_pll_idx;
@@ -285,16 +283,12 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
- char *str;
+ char str[HL_STR_MAX];
- if (atomic_read(&hdev->in_reset))
- str = "In reset";
- else if (hdev->disabled)
- str = "Malfunction";
- else if (hdev->needs_reset)
- str = "Needs Reset";
- else
- str = "Operational";
+ strscpy(str, hdev->status[hl_device_status(hdev)], HL_STR_MAX);
+
+ /* use uppercase for backward compatibility */
+ str[0] = 'A' + (str[0] - 'a');
return sprintf(buf, "%s\n", str);
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index aa8a0ca5aca2..383865be3c2c 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -76,7 +76,7 @@
#define GAUDI_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
-#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
+#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 4000000 /* 4s */
#define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
#define GAUDI_WAIT_FOR_BL_TIMEOUT_USEC 15000000 /* 15s */
@@ -106,6 +106,21 @@
#define GAUDI_PLL_MAX 10
+#define BIN_REG_STRING_SIZE sizeof("0b10101010101010101010101010101010")
+
+#define MONITOR_SOB_STRING_SIZE 256
+
+static u32 gaudi_stream_master[GAUDI_STREAM_MASTER_ARR_SIZE] = {
+ GAUDI_QUEUE_ID_DMA_0_0,
+ GAUDI_QUEUE_ID_DMA_0_1,
+ GAUDI_QUEUE_ID_DMA_0_2,
+ GAUDI_QUEUE_ID_DMA_0_3,
+ GAUDI_QUEUE_ID_DMA_1_0,
+ GAUDI_QUEUE_ID_DMA_1_1,
+ GAUDI_QUEUE_ID_DMA_1_2,
+ GAUDI_QUEUE_ID_DMA_1_3
+};
+
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
"gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
@@ -348,6 +363,97 @@ static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_3 */
};
+static struct hl_hw_obj_name_entry gaudi_so_id_to_str[] = {
+ { .id = 0, .name = "SYNC_OBJ_DMA_DOWN_FEEDBACK" },
+ { .id = 1, .name = "SYNC_OBJ_DMA_UP_FEEDBACK" },
+ { .id = 2, .name = "SYNC_OBJ_DMA_STATIC_DRAM_SRAM_FEEDBACK" },
+ { .id = 3, .name = "SYNC_OBJ_DMA_SRAM_DRAM_FEEDBACK" },
+ { .id = 4, .name = "SYNC_OBJ_FIRST_COMPUTE_FINISH" },
+ { .id = 5, .name = "SYNC_OBJ_HOST_DRAM_DONE" },
+ { .id = 6, .name = "SYNC_OBJ_DBG_CTR_DEPRECATED" },
+ { .id = 7, .name = "SYNC_OBJ_DMA_ACTIVATIONS_DRAM_SRAM_FEEDBACK" },
+ { .id = 8, .name = "SYNC_OBJ_ENGINE_SEM_MME_0" },
+ { .id = 9, .name = "SYNC_OBJ_ENGINE_SEM_MME_1" },
+ { .id = 10, .name = "SYNC_OBJ_ENGINE_SEM_TPC_0" },
+ { .id = 11, .name = "SYNC_OBJ_ENGINE_SEM_TPC_1" },
+ { .id = 12, .name = "SYNC_OBJ_ENGINE_SEM_TPC_2" },
+ { .id = 13, .name = "SYNC_OBJ_ENGINE_SEM_TPC_3" },
+ { .id = 14, .name = "SYNC_OBJ_ENGINE_SEM_TPC_4" },
+ { .id = 15, .name = "SYNC_OBJ_ENGINE_SEM_TPC_5" },
+ { .id = 16, .name = "SYNC_OBJ_ENGINE_SEM_TPC_6" },
+ { .id = 17, .name = "SYNC_OBJ_ENGINE_SEM_TPC_7" },
+ { .id = 18, .name = "SYNC_OBJ_ENGINE_SEM_DMA_1" },
+ { .id = 19, .name = "SYNC_OBJ_ENGINE_SEM_DMA_2" },
+ { .id = 20, .name = "SYNC_OBJ_ENGINE_SEM_DMA_3" },
+ { .id = 21, .name = "SYNC_OBJ_ENGINE_SEM_DMA_4" },
+ { .id = 22, .name = "SYNC_OBJ_ENGINE_SEM_DMA_5" },
+ { .id = 23, .name = "SYNC_OBJ_ENGINE_SEM_DMA_6" },
+ { .id = 24, .name = "SYNC_OBJ_ENGINE_SEM_DMA_7" },
+ { .id = 25, .name = "SYNC_OBJ_DBG_CTR_0" },
+ { .id = 26, .name = "SYNC_OBJ_DBG_CTR_1" },
+};
+
+static struct hl_hw_obj_name_entry gaudi_monitor_id_to_str[] = {
+ { .id = 200, .name = "MON_OBJ_DMA_DOWN_FEEDBACK_RESET" },
+ { .id = 201, .name = "MON_OBJ_DMA_UP_FEADBACK_RESET" },
+ { .id = 203, .name = "MON_OBJ_DRAM_TO_SRAM_QUEUE_FENCE" },
+ { .id = 204, .name = "MON_OBJ_TPC_0_CLK_GATE" },
+ { .id = 205, .name = "MON_OBJ_TPC_1_CLK_GATE" },
+ { .id = 206, .name = "MON_OBJ_TPC_2_CLK_GATE" },
+ { .id = 207, .name = "MON_OBJ_TPC_3_CLK_GATE" },
+ { .id = 208, .name = "MON_OBJ_TPC_4_CLK_GATE" },
+ { .id = 209, .name = "MON_OBJ_TPC_5_CLK_GATE" },
+ { .id = 210, .name = "MON_OBJ_TPC_6_CLK_GATE" },
+ { .id = 211, .name = "MON_OBJ_TPC_7_CLK_GATE" },
+};
+
+static s64 gaudi_state_dump_specs_props[] = {
+ [SP_SYNC_OBJ_BASE_ADDR] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0,
+ [SP_NEXT_SYNC_OBJ_ADDR] = NEXT_SYNC_OBJ_ADDR_INTERVAL,
+ [SP_SYNC_OBJ_AMOUNT] = NUM_OF_SOB_IN_BLOCK,
+ [SP_MON_OBJ_WR_ADDR_LOW] =
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0,
+ [SP_MON_OBJ_WR_ADDR_HIGH] =
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0,
+ [SP_MON_OBJ_WR_DATA] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_DATA_0,
+ [SP_MON_OBJ_ARM_DATA] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_ARM_0,
+ [SP_MON_OBJ_STATUS] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0,
+ [SP_MONITORS_AMOUNT] = NUM_OF_MONITORS_IN_BLOCK,
+ [SP_TPC0_CMDQ] = mmTPC0_QM_GLBL_CFG0,
+ [SP_TPC0_CFG_SO] = mmTPC0_CFG_QM_SYNC_OBJECT_ADDR,
+ [SP_NEXT_TPC] = mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0,
+ [SP_MME_CMDQ] = mmMME0_QM_GLBL_CFG0,
+ [SP_MME_CFG_SO] = mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL,
+ [SP_NEXT_MME] = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0,
+ [SP_DMA_CMDQ] = mmDMA0_QM_GLBL_CFG0,
+ [SP_DMA_CFG_SO] = mmDMA0_CORE_WR_COMP_ADDR_LO,
+ [SP_DMA_QUEUES_OFFSET] = mmDMA1_QM_GLBL_CFG0 - mmDMA0_QM_GLBL_CFG0,
+ [SP_NUM_OF_MME_ENGINES] = NUM_OF_MME_ENGINES,
+ [SP_SUB_MME_ENG_NUM] = NUM_OF_MME_SUB_ENGINES,
+ [SP_NUM_OF_DMA_ENGINES] = NUM_OF_DMA_ENGINES,
+ [SP_NUM_OF_TPC_ENGINES] = NUM_OF_TPC_ENGINES,
+ [SP_ENGINE_NUM_OF_QUEUES] = NUM_OF_QUEUES,
+ [SP_ENGINE_NUM_OF_STREAMS] = NUM_OF_STREAMS,
+ [SP_ENGINE_NUM_OF_FENCES] = NUM_OF_FENCES,
+ [SP_FENCE0_CNT_OFFSET] =
+ mmDMA0_QM_CP_FENCE0_CNT_0 - mmDMA0_QM_GLBL_CFG0,
+ [SP_FENCE0_RDATA_OFFSET] =
+ mmDMA0_QM_CP_FENCE0_RDATA_0 - mmDMA0_QM_GLBL_CFG0,
+ [SP_CP_STS_OFFSET] = mmDMA0_QM_CP_STS_0 - mmDMA0_QM_GLBL_CFG0,
+ [SP_NUM_CORES] = 1,
+};
+
+/* The order here is opposite to the order of the indexing in the h/w.
+ * i.e. SYNC_MGR_W_S is actually 0, SYNC_MGR_E_S is 1, etc.
+ */
+static const char * const gaudi_sync_manager_names[] = {
+ "SYNC_MGR_E_N",
+ "SYNC_MGR_W_N",
+ "SYNC_MGR_E_S",
+ "SYNC_MGR_W_S",
+ NULL
+};
+
struct ecc_info_extract_params {
u64 block_address;
u32 num_memories;
@@ -363,8 +469,6 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
u32 size, u64 val);
static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
u32 num_regs, u32 val);
-static int gaudi_schedule_register_memset(struct hl_device *hdev,
- u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val);
static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
u32 tpc_id);
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev);
@@ -375,7 +479,6 @@ static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
u32 size, bool eb);
static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
-
static inline enum hl_collective_mode
get_collective_mode(struct hl_device *hdev, u32 queue_id)
{
@@ -403,7 +506,11 @@ static inline void set_default_power_values(struct hl_device *hdev)
if (hdev->card_type == cpucp_card_type_pmc) {
prop->max_power_default = MAX_POWER_DEFAULT_PMC;
- prop->dc_power_default = DC_POWER_DEFAULT_PMC;
+
+ if (prop->fw_security_enabled)
+ prop->dc_power_default = DC_POWER_DEFAULT_PMC_SEC;
+ else
+ prop->dc_power_default = DC_POWER_DEFAULT_PMC;
} else {
prop->max_power_default = MAX_POWER_DEFAULT_PCI;
prop->dc_power_default = DC_POWER_DEFAULT_PCI;
@@ -450,6 +557,7 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
get_collective_mode(hdev, i);
}
+ prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->collective_first_sob = 0;
prop->collective_first_mon = 0;
@@ -551,6 +659,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->hard_reset_done_by_fw = false;
prop->gic_interrupts_enable = true;
+ prop->server_type = HL_SERVER_TYPE_UNKNOWN;
+
return 0;
}
@@ -723,14 +833,14 @@ pci_init:
GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
if (rc) {
if (hdev->reset_on_preboot_fail)
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_info(hdev->dev,
"H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
}
return 0;
@@ -974,17 +1084,11 @@ static void gaudi_sob_group_hw_reset(struct kref *ref)
struct gaudi_hw_sob_group *hw_sob_group =
container_of(ref, struct gaudi_hw_sob_group, kref);
struct hl_device *hdev = hw_sob_group->hdev;
- u64 base_addr;
- int rc;
+ int i;
- base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
- hw_sob_group->base_sob_id * 4;
- rc = gaudi_schedule_register_memset(hdev, hw_sob_group->queue_id,
- base_addr, NUMBER_OF_SOBS_IN_GRP, 0);
- if (rc)
- dev_err(hdev->dev,
- "failed resetting sob group - sob base %u, count %u",
- hw_sob_group->base_sob_id, NUMBER_OF_SOBS_IN_GRP);
+ for (i = 0 ; i < NUMBER_OF_SOBS_IN_GRP ; i++)
+ WREG32((mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ (hw_sob_group->base_sob_id * 4) + (i * 4)), 0);
kref_init(&hw_sob_group->kref);
}
@@ -1121,6 +1225,20 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
queue_id = job->hw_queue_id;
prop = &hdev->kernel_queues[queue_id].sync_stream_prop;
+ if (job->cs->encaps_signals) {
+ /* use the encaps signal handle store earlier in the flow
+ * and set the SOB information from the encaps
+ * signals handle
+ */
+ hl_hw_queue_encaps_sig_set_sob_info(hdev, job->cs, job,
+ cs_cmpl);
+
+ dev_dbg(hdev->dev, "collective wait: Sequence %llu found, sob_id: %u, wait for sob_val: %u\n",
+ job->cs->sequence,
+ cs_cmpl->hw_sob->sob_id,
+ cs_cmpl->sob_val);
+ }
+
/* Add to wait CBs using slave monitor */
wait_prop.data = (void *) job->user_cb;
wait_prop.sob_base = cs_cmpl->hw_sob->sob_id;
@@ -1131,7 +1249,7 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
wait_prop.size = cb_size;
dev_dbg(hdev->dev,
- "Generate slave wait CB, sob %d, val:0x%x, mon %d, q %d\n",
+ "Generate slave wait CB, sob %d, val:%x, mon %d, q %d\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
prop->collective_slave_mon_id, queue_id);
@@ -1145,7 +1263,7 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
prop->collective_sob_id, cb_size, false);
}
-static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
+static int gaudi_collective_wait_init_cs(struct hl_cs *cs)
{
struct hl_cs_compl *signal_cs_cmpl =
container_of(cs->signal_fence, struct hl_cs_compl, base_fence);
@@ -1163,9 +1281,37 @@ static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
gaudi = hdev->asic_specific;
cprop = &gaudi->collective_props;
- /* copy the SOB id and value of the signal CS */
- cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
- cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+ /* In encaps signals case the SOB info will be retrieved from
+ * the handle in gaudi_collective_slave_init_job.
+ */
+ if (!cs->encaps_signals) {
+ /* copy the SOB id and value of the signal CS */
+ cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
+ cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+ }
+
+ /* check again if the signal cs already completed.
+ * if yes then don't send any wait cs since the hw_sob
+ * could be in reset already. if signal is not completed
+ * then get refcount to hw_sob to prevent resetting the sob
+ * while wait cs is not submitted.
+ * note that this check is protected by two locks,
+ * hw queue lock and completion object lock,
+ * and the same completion object lock also protects
+ * the hw_sob reset handler function.
+ * The hw_queue lock prevent out of sync of hw_sob
+ * refcount value, changed by signal/wait flows.
+ */
+ spin_lock(&signal_cs_cmpl->lock);
+
+ if (completion_done(&cs->signal_fence->completion)) {
+ spin_unlock(&signal_cs_cmpl->lock);
+ return -EINVAL;
+ }
+ /* Increment kref since all slave queues are now waiting on it */
+ kref_get(&cs_cmpl->hw_sob->kref);
+
+ spin_unlock(&signal_cs_cmpl->lock);
/* Calculate the stream from collective master queue (1st job) */
job = list_first_entry(&cs->job_list, struct hl_cs_job, cs_node);
@@ -1210,21 +1356,17 @@ static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
cprop->curr_sob_group_idx[stream], stream);
}
- /* Increment kref since all slave queues are now waiting on it */
- kref_get(&cs_cmpl->hw_sob->kref);
- /*
- * Must put the signal fence after the SOB refcnt increment so
- * the SOB refcnt won't turn 0 and reset the SOB before the
- * wait CS was submitted.
- */
mb();
hl_fence_put(cs->signal_fence);
cs->signal_fence = NULL;
+
+ return 0;
}
static int gaudi_collective_wait_create_job(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs,
- enum hl_collective_mode mode, u32 queue_id, u32 wait_queue_id)
+ enum hl_collective_mode mode, u32 queue_id, u32 wait_queue_id,
+ u32 encaps_signal_offset)
{
struct hw_queue_properties *hw_queue_prop;
struct hl_cs_counters_atomic *cntr;
@@ -1284,6 +1426,13 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev,
job->user_cb_size = cb_size;
job->hw_queue_id = queue_id;
+ /* since its guaranteed to have only one chunk in the collective wait
+ * cs, we can use this chunk to set the encapsulated signal offset
+ * in the jobs.
+ */
+ if (cs->encaps_signals)
+ job->encaps_sig_wait_offset = encaps_signal_offset;
+
/*
* No need in parsing, user CB is the patched CB.
* We call hl_cb_destroy() out of two reasons - we don't need
@@ -1312,8 +1461,9 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev,
}
static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
- struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
- u32 collective_engine_id)
+ struct hl_ctx *ctx, struct hl_cs *cs,
+ u32 wait_queue_id, u32 collective_engine_id,
+ u32 encaps_signal_offset)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct hw_queue_properties *hw_queue_prop;
@@ -1363,7 +1513,8 @@ static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
if (i == 0) {
queue_id = wait_queue_id;
rc = gaudi_collective_wait_create_job(hdev, ctx, cs,
- HL_COLLECTIVE_MASTER, queue_id, wait_queue_id);
+ HL_COLLECTIVE_MASTER, queue_id,
+ wait_queue_id, encaps_signal_offset);
} else {
if (nic_idx < NIC_NUMBER_OF_ENGINES) {
if (gaudi->hw_cap_initialized &
@@ -1383,7 +1534,8 @@ static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
}
rc = gaudi_collective_wait_create_job(hdev, ctx, cs,
- HL_COLLECTIVE_SLAVE, queue_id, wait_queue_id);
+ HL_COLLECTIVE_SLAVE, queue_id,
+ wait_queue_id, encaps_signal_offset);
}
if (rc)
@@ -1431,6 +1583,11 @@ static int gaudi_late_init(struct hl_device *hdev)
return rc;
}
+ /* Scrub both SRAM and DRAM */
+ rc = hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
+ if (rc)
+ goto disable_pci_access;
+
rc = gaudi_fetch_psoc_frequency(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to fetch psoc frequency\n");
@@ -1455,6 +1612,11 @@ static int gaudi_late_init(struct hl_device *hdev)
goto disable_pci_access;
}
+ /* We only support a single ASID for the user, so for the sake of optimization, just
+ * initialize the ASID one time during device initialization with the fixed value of 1
+ */
+ gaudi_mmu_prepare(hdev, 1);
+
return 0;
disable_pci_access:
@@ -1720,8 +1882,12 @@ static int gaudi_sw_init(struct hl_device *hdev)
hdev->supports_sync_stream = true;
hdev->supports_coresight = true;
hdev->supports_staged_submission = true;
+ hdev->supports_wait_for_multi_cs = true;
- gaudi_set_pci_memory_regions(hdev);
+ hdev->asic_funcs->set_pci_memory_regions(hdev);
+ hdev->stream_master_qid_arr =
+ hdev->asic_funcs->get_stream_master_qid_arr();
+ hdev->stream_master_qid_arr_size = GAUDI_STREAM_MASTER_ARR_SIZE;
return 0;
@@ -2523,7 +2689,7 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
tpc_id < TPC_NUMBER_OF_ENGINES;
tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
/* Mask all arithmetic interrupts from TPC */
- WREG32(mmTPC0_CFG_TPC_INTR_MASK + tpc_offset, 0x8FFF);
+ WREG32(mmTPC0_CFG_TPC_INTR_MASK + tpc_offset, 0x8FFE);
/* Set 16 cache lines */
WREG32_FIELD(TPC0_CFG_MSS_CONFIG, tpc_offset,
ICACHE_FETCH_LINE_NUM, 2);
@@ -3670,7 +3836,7 @@ static void gaudi_disable_timestamp(struct hl_device *hdev)
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
}
-static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
+static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
u32 wait_timeout_ms;
@@ -3682,6 +3848,9 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
else
wait_timeout_ms = GAUDI_RESET_WAIT_MSEC;
+ if (fw_reset)
+ goto skip_engines;
+
gaudi_stop_nic_qmans(hdev);
gaudi_stop_mme_qmans(hdev);
gaudi_stop_tpc_qmans(hdev);
@@ -3707,6 +3876,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
gaudi_disable_timestamp(hdev);
+skip_engines:
gaudi_disable_msi(hdev);
}
@@ -3739,6 +3909,9 @@ static int gaudi_mmu_init(struct hl_device *hdev)
WREG32(mmSTLB_CACHE_INV_BASE_39_8, MMU_CACHE_MNG_ADDR >> 8);
WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
+ /* mem cache invalidation */
+ WREG32(mmSTLB_MEM_CACHE_INVALIDATION, 1);
+
hdev->asic_funcs->mmu_invalidate_cache(hdev, true, 0);
WREG32(mmMMU_UP_MMU_ENABLE, 1);
@@ -4071,7 +4244,7 @@ disable_queues:
return rc;
}
-static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
+static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
@@ -4092,6 +4265,14 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
}
+ if (fw_reset) {
+ dev_info(hdev->dev,
+ "Firmware performs HARD reset, going to wait %dms\n",
+ reset_timeout_ms);
+
+ goto skip_reset;
+ }
+
driver_performs_reset = !!(!hdev->asic_prop.fw_security_enabled &&
!hdev->asic_prop.hard_reset_done_by_fw);
@@ -4168,6 +4349,7 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
reset_timeout_ms);
}
+skip_reset:
/*
* After hard reset, we can't poll the BTM_FSM register because the PSOC
* itself is in reset. Need to wait until the reset is deasserted
@@ -4212,7 +4394,7 @@ static int gaudi_resume(struct hl_device *hdev)
return gaudi_init_iatu(hdev);
}
-static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
+static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int rc;
@@ -4621,8 +4803,8 @@ static int gaudi_hbm_scrubbing(struct hl_device *hdev)
"Doing HBM scrubbing for 0x%09llx - 0x%09llx\n",
cur_addr, cur_addr + chunk_size);
- WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0);
- WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0);
+ WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0xdeadbeaf);
+ WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0xdeadbeaf);
WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset,
lower_32_bits(cur_addr));
WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset,
@@ -5796,78 +5978,6 @@ release_cb:
return rc;
}
-static int gaudi_schedule_register_memset(struct hl_device *hdev,
- u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val)
-{
- struct hl_ctx *ctx;
- struct hl_pending_cb *pending_cb;
- struct packet_msg_long *pkt;
- u32 cb_size, ctl;
- struct hl_cb *cb;
- int i, rc;
-
- mutex_lock(&hdev->fpriv_list_lock);
- ctx = hdev->compute_ctx;
-
- /* If no compute context available or context is going down
- * memset registers directly
- */
- if (!ctx || kref_read(&ctx->refcount) == 0) {
- rc = gaudi_memset_registers(hdev, reg_base, num_regs, val);
- mutex_unlock(&hdev->fpriv_list_lock);
- return rc;
- }
-
- mutex_unlock(&hdev->fpriv_list_lock);
-
- cb_size = (sizeof(*pkt) * num_regs) +
- sizeof(struct packet_msg_prot) * 2;
-
- if (cb_size > SZ_2M) {
- dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M);
- return -ENOMEM;
- }
-
- pending_cb = kzalloc(sizeof(*pending_cb), GFP_KERNEL);
- if (!pending_cb)
- return -ENOMEM;
-
- cb = hl_cb_kernel_create(hdev, cb_size, false);
- if (!cb) {
- kfree(pending_cb);
- return -EFAULT;
- }
-
- pkt = cb->kernel_address;
-
- ctl = FIELD_PREP(GAUDI_PKT_LONG_CTL_OP_MASK, 0); /* write the value */
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_LONG);
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
-
- for (i = 0; i < num_regs ; i++, pkt++) {
- pkt->ctl = cpu_to_le32(ctl);
- pkt->value = cpu_to_le32(val);
- pkt->addr = cpu_to_le64(reg_base + (i * 4));
- }
-
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
-
- pending_cb->cb = cb;
- pending_cb->cb_size = cb_size;
- /* The queue ID MUST be an external queue ID. Otherwise, we will
- * have undefined behavior
- */
- pending_cb->hw_queue_id = hw_queue_id;
-
- spin_lock(&ctx->pending_cb_lock);
- list_add_tail(&pending_cb->cb_node, &ctx->pending_cb_list);
- spin_unlock(&ctx->pending_cb_lock);
-
- return 0;
-}
-
static int gaudi_restore_sm_registers(struct hl_device *hdev)
{
u64 base_addr;
@@ -6013,7 +6123,7 @@ static int gaudi_restore_user_registers(struct hl_device *hdev)
static int gaudi_context_switch(struct hl_device *hdev, u32 asid)
{
- return gaudi_restore_user_registers(hdev);
+ return 0;
}
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev)
@@ -6723,6 +6833,9 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
asid);
}
+ gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid);
+ gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid);
+
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
@@ -6772,7 +6885,8 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
dma_offset = gaudi_dma_assignment[GAUDI_PCI_DMA_1] * DMA_CORE_OFFSET;
- WREG32_OR(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_VAL_SHIFT));
+ WREG32(mmDMA0_CORE_PROT + dma_offset,
+ BIT(DMA0_CORE_PROT_ERR_VAL_SHIFT) | BIT(DMA0_CORE_PROT_VAL_SHIFT));
rc = hl_hw_queue_send_cb_no_cmpl(hdev, GAUDI_QUEUE_ID_DMA_0_0,
job->job_cb_size, cb->bus_address);
@@ -6793,8 +6907,7 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
}
free_fence_ptr:
- WREG32_AND(mmDMA0_CORE_PROT + dma_offset,
- ~BIT(DMA0_CORE_PROT_VAL_SHIFT));
+ WREG32(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_ERR_VAL_SHIFT));
hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
fence_dma_addr);
@@ -7168,7 +7281,7 @@ static void gaudi_print_sw_config_stream_data(struct hl_device *hdev, u32 stream
cq_ptr = (((u64) RREG32(cq_ptr_hi)) << 32) | RREG32(cq_ptr_lo);
size = RREG32(cq_tsize);
- dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %x\n",
+ dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %u\n",
stream, cq_ptr, size);
}
@@ -7224,7 +7337,7 @@ static void gaudi_print_last_pqes_on_err(struct hl_device *hdev, u32 qid_base,
addr = le64_to_cpu(bd->ptr);
- dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %x\n",
+ dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %u\n",
stream, ci, addr, len);
/* get previous ci, wrap if needed */
@@ -7326,24 +7439,30 @@ static void gaudi_print_sm_sei_info(struct hl_device *hdev, u16 event_type,
{
u32 index = event_type - GAUDI_EVENT_DMA_IF_SEI_0;
+ /* Flip the bits as the enum is ordered in the opposite way */
+ index = (index ^ 0x3) & 0x3;
+
switch (sei_data->sei_cause) {
case SM_SEI_SO_OVERFLOW:
- dev_err(hdev->dev,
- "SM %u SEI Error: SO %u overflow/underflow",
- index, le32_to_cpu(sei_data->sei_log));
+ dev_err_ratelimited(hdev->dev,
+ "%s SEI Error: SOB Group %u overflow/underflow",
+ gaudi_sync_manager_names[index],
+ le32_to_cpu(sei_data->sei_log));
break;
case SM_SEI_LBW_4B_UNALIGNED:
- dev_err(hdev->dev,
- "SM %u SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x",
- index, le32_to_cpu(sei_data->sei_log));
+ dev_err_ratelimited(hdev->dev,
+ "%s SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x",
+ gaudi_sync_manager_names[index],
+ le32_to_cpu(sei_data->sei_log));
break;
case SM_SEI_AXI_RESPONSE_ERR:
- dev_err(hdev->dev,
- "SM %u SEI Error: AXI ID %u response error",
- index, le32_to_cpu(sei_data->sei_log));
+ dev_err_ratelimited(hdev->dev,
+ "%s SEI Error: AXI ID %u response error",
+ gaudi_sync_manager_names[index],
+ le32_to_cpu(sei_data->sei_log));
break;
default:
- dev_err(hdev->dev, "Unknown SM SEI cause %u",
+ dev_err_ratelimited(hdev->dev, "Unknown SM SEI cause %u",
le32_to_cpu(sei_data->sei_log));
break;
}
@@ -7358,6 +7477,11 @@ static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
bool extract_info_from_fw;
int rc;
+ if (hdev->asic_prop.fw_security_enabled) {
+ extract_info_from_fw = true;
+ goto extract_ecc_info;
+ }
+
switch (event_type) {
case GAUDI_EVENT_PCIE_CORE_SERR ... GAUDI_EVENT_PCIE_PHY_DERR:
case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_MMU_DERR:
@@ -7430,6 +7554,7 @@ static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
return;
}
+extract_ecc_info:
if (extract_info_from_fw) {
ecc_address = le64_to_cpu(ecc_data->ecc_address);
ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom);
@@ -7806,8 +7931,15 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
>> EQ_CTL_EVENT_TYPE_SHIFT);
- u8 cause;
bool reset_required;
+ u8 cause;
+ int rc;
+
+ if (event_type >= GAUDI_EVENT_SIZE) {
+ dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
+ event_type, GAUDI_EVENT_SIZE - 1);
+ return;
+ }
gaudi->events_stat[event_type]++;
gaudi->events_stat_aggregate[event_type]++;
@@ -7880,10 +8012,10 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
tpc_dec_event_to_tpc_id(event_type),
"AXI_SLV_DEC_Error");
if (reset_required) {
- dev_err(hdev->dev, "hard reset required due to %s\n",
+ dev_err(hdev->dev, "reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
- goto reset_device;
+ hl_device_reset(hdev, 0);
} else {
hl_fw_unmask_irq(hdev, event_type);
}
@@ -7902,10 +8034,10 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
tpc_krn_event_to_tpc_id(event_type),
"KRN_ERR");
if (reset_required) {
- dev_err(hdev->dev, "hard reset required due to %s\n",
+ dev_err(hdev->dev, "reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
- goto reset_device;
+ hl_device_reset(hdev, 0);
} else {
hl_fw_unmask_irq(hdev, event_type);
}
@@ -7993,6 +8125,10 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
gaudi_print_irq_info(hdev, event_type, false);
gaudi_print_sm_sei_info(hdev, event_type,
&eq_entry->sm_sei_data);
+ rc = hl_state_dump(hdev);
+ if (rc)
+ dev_err(hdev->dev,
+ "Error during system state dump %d\n", rc);
hl_fw_unmask_irq(hdev, event_type);
break;
@@ -8031,7 +8167,9 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
return;
reset_device:
- if (hdev->hard_reset_on_fw_events)
+ if (hdev->asic_prop.fw_security_enabled)
+ hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FW);
+ else if (hdev->hard_reset_on_fw_events)
hl_device_reset(hdev, HL_RESET_HARD);
else
hl_fw_unmask_irq(hdev, event_type);
@@ -8563,11 +8701,20 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev,
static int gaudi_ctx_init(struct hl_ctx *ctx)
{
+ int rc;
+
if (ctx->asid == HL_KERNEL_ASID_ID)
return 0;
- gaudi_mmu_prepare(ctx->hdev, ctx->asid);
- return gaudi_internal_cb_pool_init(ctx->hdev, ctx);
+ rc = gaudi_internal_cb_pool_init(ctx->hdev, ctx);
+ if (rc)
+ return rc;
+
+ rc = gaudi_restore_user_registers(ctx->hdev);
+ if (rc)
+ gaudi_internal_cb_pool_fini(ctx->hdev, ctx);
+
+ return rc;
}
static void gaudi_ctx_fini(struct hl_ctx *ctx)
@@ -8596,6 +8743,11 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
sizeof(struct packet_msg_prot) * 2;
}
+static u32 gaudi_get_sob_addr(struct hl_device *hdev, u32 sob_id)
+{
+ return mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + (sob_id * 4);
+}
+
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
u32 size, bool eb)
{
@@ -8902,16 +9054,12 @@ static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
static void gaudi_reset_sob(struct hl_device *hdev, void *data)
{
struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data;
- int rc;
dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx,
hw_sob->sob_id);
- rc = gaudi_schedule_register_memset(hdev, hw_sob->q_idx,
- CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
- hw_sob->sob_id * 4, 1, 0);
- if (rc)
- dev_err(hdev->dev, "failed resetting sob %u", hw_sob->sob_id);
+ WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ hw_sob->sob_id * 4, 0);
kref_init(&hw_sob->kref);
}
@@ -8977,6 +9125,280 @@ static int gaudi_map_pll_idx_to_fw_idx(u32 pll_idx)
}
}
+static int gaudi_add_sync_to_engine_map_entry(
+ struct hl_sync_to_engine_map *map, u32 reg_value,
+ enum hl_sync_engine_type engine_type, u32 engine_id)
+{
+ struct hl_sync_to_engine_map_entry *entry;
+
+ /* Reg value represents a partial address of sync object,
+ * it is used as unique identifier. For this we need to
+ * clear the cutoff cfg base bits from the value.
+ */
+ if (reg_value == 0 || reg_value == 0xffffffff)
+ return 0;
+ reg_value -= (u32)CFG_BASE;
+
+ /* create a new hash entry */
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ entry->engine_type = engine_type;
+ entry->engine_id = engine_id;
+ entry->sync_id = reg_value;
+ hash_add(map->tb, &entry->node, reg_value);
+
+ return 0;
+}
+
+static int gaudi_gen_sync_to_engine_map(struct hl_device *hdev,
+ struct hl_sync_to_engine_map *map)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int i, j, rc;
+ u32 reg_value;
+
+ /* Iterate over TPC engines */
+ for (i = 0; i < sds->props[SP_NUM_OF_TPC_ENGINES]; ++i) {
+ /* TPC registered must be accessed with clock gating disabled */
+ mutex_lock(&gaudi->clk_gate_mutex);
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ reg_value = RREG32(sds->props[SP_TPC0_CFG_SO] +
+ sds->props[SP_NEXT_TPC] * i);
+
+ /* We can reenable clock_gating */
+ hdev->asic_funcs->set_clock_gating(hdev);
+ mutex_unlock(&gaudi->clk_gate_mutex);
+
+ rc = gaudi_add_sync_to_engine_map_entry(map, reg_value,
+ ENGINE_TPC, i);
+ if (rc)
+ goto free_sync_to_engine_map;
+ }
+
+ /* Iterate over MME engines */
+ for (i = 0; i < sds->props[SP_NUM_OF_MME_ENGINES]; ++i) {
+ for (j = 0; j < sds->props[SP_SUB_MME_ENG_NUM]; ++j) {
+ /* MME registered must be accessed with clock gating
+ * disabled
+ */
+ mutex_lock(&gaudi->clk_gate_mutex);
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ reg_value = RREG32(sds->props[SP_MME_CFG_SO] +
+ sds->props[SP_NEXT_MME] * i +
+ j * sizeof(u32));
+
+ /* We can reenable clock_gating */
+ hdev->asic_funcs->set_clock_gating(hdev);
+ mutex_unlock(&gaudi->clk_gate_mutex);
+
+ rc = gaudi_add_sync_to_engine_map_entry(
+ map, reg_value, ENGINE_MME,
+ i * sds->props[SP_SUB_MME_ENG_NUM] + j);
+ if (rc)
+ goto free_sync_to_engine_map;
+ }
+ }
+
+ /* Iterate over DMA engines */
+ for (i = 0; i < sds->props[SP_NUM_OF_DMA_ENGINES]; ++i) {
+ reg_value = RREG32(sds->props[SP_DMA_CFG_SO] +
+ sds->props[SP_DMA_QUEUES_OFFSET] * i);
+ rc = gaudi_add_sync_to_engine_map_entry(map, reg_value,
+ ENGINE_DMA, i);
+ if (rc)
+ goto free_sync_to_engine_map;
+ }
+
+ return 0;
+
+free_sync_to_engine_map:
+ hl_state_dump_free_sync_to_engine_map(map);
+
+ return rc;
+}
+
+static int gaudi_monitor_valid(struct hl_mon_state_dump *mon)
+{
+ return FIELD_GET(
+ SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_VALID_MASK,
+ mon->status);
+}
+
+static void gaudi_fill_sobs_from_mon(char *sobs, struct hl_mon_state_dump *mon)
+{
+ const size_t max_write = 10;
+ u32 gid, mask, sob;
+ int i, offset;
+
+ /* Sync object ID is calculated as follows:
+ * (8 * group_id + cleared bits in mask)
+ */
+ gid = FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_MASK,
+ mon->arm_data);
+ mask = FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_MASK,
+ mon->arm_data);
+
+ for (i = 0, offset = 0; mask && offset < MONITOR_SOB_STRING_SIZE -
+ max_write; mask >>= 1, i++) {
+ if (!(mask & 1)) {
+ sob = gid * MONITOR_MAX_SOBS + i;
+
+ if (offset > 0)
+ offset += snprintf(sobs + offset, max_write,
+ ", ");
+
+ offset += snprintf(sobs + offset, max_write, "%u", sob);
+ }
+ }
+}
+
+static int gaudi_print_single_monitor(char **buf, size_t *size, size_t *offset,
+ struct hl_device *hdev,
+ struct hl_mon_state_dump *mon)
+{
+ const char *name;
+ char scratch_buf1[BIN_REG_STRING_SIZE],
+ scratch_buf2[BIN_REG_STRING_SIZE];
+ char monitored_sobs[MONITOR_SOB_STRING_SIZE] = {0};
+
+ name = hl_state_dump_get_monitor_name(hdev, mon);
+ if (!name)
+ name = "";
+
+ gaudi_fill_sobs_from_mon(monitored_sobs, mon);
+
+ return hl_snprintf_resize(
+ buf, size, offset,
+ "Mon id: %u%s, wait for group id: %u mask %s to reach val: %u and write %u to address 0x%llx. Pending: %s. Means sync objects [%s] are being monitored.",
+ mon->id, name,
+ FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_MASK,
+ mon->arm_data),
+ hl_format_as_binary(
+ scratch_buf1, sizeof(scratch_buf1),
+ FIELD_GET(
+ SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_MASK,
+ mon->arm_data)),
+ FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOD_MASK,
+ mon->arm_data),
+ mon->wr_data,
+ (((u64)mon->wr_addr_high) << 32) | mon->wr_addr_low,
+ hl_format_as_binary(
+ scratch_buf2, sizeof(scratch_buf2),
+ FIELD_GET(
+ SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_PENDING_MASK,
+ mon->status)),
+ monitored_sobs);
+}
+
+
+static int gaudi_print_fences_single_engine(
+ struct hl_device *hdev, u64 base_offset, u64 status_base_offset,
+ enum hl_sync_engine_type engine_type, u32 engine_id, char **buf,
+ size_t *size, size_t *offset)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ int rc = -ENOMEM, i;
+ u32 *statuses, *fences;
+
+ statuses = kcalloc(sds->props[SP_ENGINE_NUM_OF_QUEUES],
+ sizeof(*statuses), GFP_KERNEL);
+ if (!statuses)
+ goto out;
+
+ fences = kcalloc(sds->props[SP_ENGINE_NUM_OF_FENCES] *
+ sds->props[SP_ENGINE_NUM_OF_QUEUES],
+ sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ goto free_status;
+
+ for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES]; ++i)
+ statuses[i] = RREG32(status_base_offset + i * sizeof(u32));
+
+ for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES] *
+ sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i)
+ fences[i] = RREG32(base_offset + i * sizeof(u32));
+
+ /* The actual print */
+ for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i) {
+ u32 fence_id;
+ u64 fence_cnt, fence_rdata;
+ const char *engine_name;
+
+ if (!FIELD_GET(TPC0_QM_CP_STS_0_FENCE_IN_PROGRESS_MASK,
+ statuses[i]))
+ continue;
+
+ fence_id =
+ FIELD_GET(TPC0_QM_CP_STS_0_FENCE_ID_MASK, statuses[i]);
+ fence_cnt = base_offset + CFG_BASE +
+ sizeof(u32) *
+ (i + fence_id * sds->props[SP_ENGINE_NUM_OF_QUEUES]);
+ fence_rdata = fence_cnt - sds->props[SP_FENCE0_CNT_OFFSET] +
+ sds->props[SP_FENCE0_RDATA_OFFSET];
+ engine_name = hl_sync_engine_to_string(engine_type);
+
+ rc = hl_snprintf_resize(
+ buf, size, offset,
+ "%s%u, stream %u: fence id %u cnt = 0x%llx (%s%u_QM.CP_FENCE%u_CNT_%u) rdata = 0x%llx (%s%u_QM.CP_FENCE%u_RDATA_%u) value = %u, cp_status = %u\n",
+ engine_name, engine_id,
+ i, fence_id,
+ fence_cnt, engine_name, engine_id, fence_id, i,
+ fence_rdata, engine_name, engine_id, fence_id, i,
+ fences[fence_id],
+ statuses[i]);
+ if (rc)
+ goto free_fences;
+ }
+
+ rc = 0;
+
+free_fences:
+ kfree(fences);
+free_status:
+ kfree(statuses);
+out:
+ return rc;
+}
+
+
+static struct hl_state_dump_specs_funcs gaudi_state_dump_funcs = {
+ .monitor_valid = gaudi_monitor_valid,
+ .print_single_monitor = gaudi_print_single_monitor,
+ .gen_sync_to_engine_map = gaudi_gen_sync_to_engine_map,
+ .print_fences_single_engine = gaudi_print_fences_single_engine,
+};
+
+static void gaudi_state_dump_init(struct hl_device *hdev)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gaudi_so_id_to_str); ++i)
+ hash_add(sds->so_id_to_str_tb,
+ &gaudi_so_id_to_str[i].node,
+ gaudi_so_id_to_str[i].id);
+
+ for (i = 0; i < ARRAY_SIZE(gaudi_monitor_id_to_str); ++i)
+ hash_add(sds->monitor_id_to_str_tb,
+ &gaudi_monitor_id_to_str[i].node,
+ gaudi_monitor_id_to_str[i].id);
+
+ sds->props = gaudi_state_dump_specs_props;
+
+ sds->sync_namager_names = gaudi_sync_manager_names;
+
+ sds->funcs = gaudi_state_dump_funcs;
+}
+
+static u32 *gaudi_get_stream_master_qid_arr(void)
+{
+ return gaudi_stream_master;
+}
+
static const struct hl_asic_funcs gaudi_funcs = {
.early_init = gaudi_early_init,
.early_fini = gaudi_early_fini,
@@ -8989,7 +9411,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.halt_engines = gaudi_halt_engines,
.suspend = gaudi_suspend,
.resume = gaudi_resume,
- .cb_mmap = gaudi_cb_mmap,
+ .mmap = gaudi_mmap,
.ring_doorbell = gaudi_ring_doorbell,
.pqe_write = gaudi_pqe_write,
.asic_dma_alloc_coherent = gaudi_dma_alloc_coherent,
@@ -9062,7 +9484,11 @@ static const struct hl_asic_funcs gaudi_funcs = {
.enable_events_from_fw = gaudi_enable_events_from_fw,
.map_pll_idx_to_fw_idx = gaudi_map_pll_idx_to_fw_idx,
.init_firmware_loader = gaudi_init_firmware_loader,
- .init_cpu_scrambler_dram = gaudi_init_scrambler_hbm
+ .init_cpu_scrambler_dram = gaudi_init_scrambler_hbm,
+ .state_dump_init = gaudi_state_dump_init,
+ .get_sob_addr = gaudi_get_sob_addr,
+ .set_pci_memory_regions = gaudi_set_pci_memory_regions,
+ .get_stream_master_qid_arr = gaudi_get_stream_master_qid_arr
};
/**
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index 957bf3720f70..bbbf1c343e75 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -36,6 +36,8 @@
#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + \
NUMBER_OF_CPU_HW_QUEUES)
+#define GAUDI_STREAM_MASTER_ARR_SIZE 8
+
#if (NUMBER_OF_INTERRUPTS > GAUDI_MSI_ENTRIES)
#error "Number of MSI interrupts must be smaller or equal to GAUDI_MSI_ENTRIES"
#endif
@@ -50,6 +52,8 @@
#define DC_POWER_DEFAULT_PCI 60000 /* 60W */
#define DC_POWER_DEFAULT_PMC 60000 /* 60W */
+#define DC_POWER_DEFAULT_PMC_SEC 97000 /* 97W */
+
#define GAUDI_CPU_TIMEOUT_USEC 30000000 /* 30s */
#define TPC_ENABLED_MASK 0xFF
@@ -62,7 +66,7 @@
#define DMA_MAX_TRANSFER_SIZE U32_MAX
-#define GAUDI_DEFAULT_CARD_NAME "HL2000"
+#define GAUDI_DEFAULT_CARD_NAME "HL205"
#define GAUDI_MAX_PENDING_CS SZ_16K
@@ -117,6 +121,7 @@
(((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_511 - \
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0) + 4) >> 2)
+#define MONITOR_MAX_SOBS 8
/* DRAM Memory Map */
@@ -200,6 +205,18 @@
#define HW_CAP_TPC_MASK GENMASK(31, 24)
#define HW_CAP_TPC_SHIFT 24
+#define NEXT_SYNC_OBJ_ADDR_INTERVAL \
+ (mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0 - \
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0)
+#define NUM_OF_MME_ENGINES 2
+#define NUM_OF_MME_SUB_ENGINES 2
+#define NUM_OF_TPC_ENGINES 8
+#define NUM_OF_DMA_ENGINES 8
+#define NUM_OF_QUEUES 5
+#define NUM_OF_STREAMS 4
+#define NUM_OF_FENCES 4
+
+
#define GAUDI_CPU_PCI_MSB_ADDR(addr) (((addr) & GENMASK_ULL(49, 39)) >> 39)
#define GAUDI_PCI_TO_CPU_ADDR(addr) \
do { \
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index c2a27ed1c4d1..5349c1be13f9 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -622,11 +622,6 @@ static int gaudi_config_etr(struct hl_device *hdev,
return -EINVAL;
}
- gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER,
- hdev->compute_ctx->asid);
- gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER,
- hdev->compute_ctx->asid);
-
msb = upper_32_bits(input->buffer_address) >> 8;
msb &= PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK;
WREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR, msb);
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index 0d3240f1f7d7..cb265c00cf73 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -9559,6 +9559,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -10013,6 +10014,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -10466,6 +10468,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -10919,6 +10922,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -11372,6 +11376,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -11825,6 +11830,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -12280,6 +12286,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -12735,6 +12742,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 755e08cf2ecc..031c1849da14 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -350,6 +350,8 @@ static u32 goya_all_events[] = {
GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E
};
+static s64 goya_state_dump_specs_props[SP_MAX] = {0};
+
static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
@@ -387,6 +389,7 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_USER;
}
+ prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->dram_base_address = DRAM_PHYS_BASE;
@@ -466,6 +469,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->hard_reset_done_by_fw = false;
prop->gic_interrupts_enable = true;
+ prop->server_type = HL_SERVER_TYPE_UNKNOWN;
+
return 0;
}
@@ -649,14 +654,14 @@ pci_init:
GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
if (rc) {
if (hdev->reset_on_preboot_fail)
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_info(hdev->dev,
"H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
}
if (!hdev->pldm) {
@@ -955,8 +960,9 @@ static int goya_sw_init(struct hl_device *hdev)
hdev->supports_coresight = true;
hdev->supports_soft_reset = true;
hdev->allow_external_soft_reset = true;
+ hdev->supports_wait_for_multi_cs = false;
- goya_set_pci_memory_regions(hdev);
+ hdev->asic_funcs->set_pci_memory_regions(hdev);
return 0;
@@ -2374,7 +2380,7 @@ static void goya_disable_timestamp(struct hl_device *hdev)
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
}
-static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
+static void goya_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
u32 wait_timeout_ms;
@@ -2493,6 +2499,7 @@ static void goya_init_firmware_loader(struct hl_device *hdev)
struct fw_load_mgr *fw_loader = &hdev->fw_loader;
/* fill common fields */
+ fw_loader->linux_loaded = false;
fw_loader->boot_fit_img.image_name = GOYA_BOOT_FIT_FILE;
fw_loader->linux_img.image_name = GOYA_LINUX_FW_FILE;
fw_loader->cpu_timeout = GOYA_CPU_TIMEOUT_USEC;
@@ -2696,14 +2703,7 @@ disable_queues:
return rc;
}
-/*
- * goya_hw_fini - Goya hardware tear-down code
- *
- * @hdev: pointer to hl_device structure
- * @hard_reset: should we do hard reset to all engines or just reset the
- * compute/dma engines
- */
-static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
+static void goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct goya_device *goya = hdev->asic_specific;
u32 reset_timeout_ms, cpu_timeout_ms, status;
@@ -2796,7 +2796,7 @@ int goya_resume(struct hl_device *hdev)
return goya_init_iatu(hdev);
}
-static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
+static int goya_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int rc;
@@ -4797,6 +4797,12 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
>> EQ_CTL_EVENT_TYPE_SHIFT);
struct goya_device *goya = hdev->asic_specific;
+ if (event_type >= GOYA_ASYNC_EVENT_ID_SIZE) {
+ dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
+ event_type, GOYA_ASYNC_EVENT_ID_SIZE - 1);
+ return;
+ }
+
goya->events_stat[event_type]++;
goya->events_stat_aggregate[event_type]++;
@@ -5475,14 +5481,14 @@ u64 goya_get_device_time(struct hl_device *hdev)
return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
}
-static void goya_collective_wait_init_cs(struct hl_cs *cs)
+static int goya_collective_wait_init_cs(struct hl_cs *cs)
{
-
+ return 0;
}
static int goya_collective_wait_create_jobs(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
- u32 collective_engine_id)
+ u32 collective_engine_id, u32 encaps_signal_offset)
{
return -EINVAL;
}
@@ -5524,6 +5530,62 @@ static int goya_map_pll_idx_to_fw_idx(u32 pll_idx)
}
}
+static int goya_gen_sync_to_engine_map(struct hl_device *hdev,
+ struct hl_sync_to_engine_map *map)
+{
+ /* Not implemented */
+ return 0;
+}
+
+static int goya_monitor_valid(struct hl_mon_state_dump *mon)
+{
+ /* Not implemented */
+ return 0;
+}
+
+static int goya_print_single_monitor(char **buf, size_t *size, size_t *offset,
+ struct hl_device *hdev,
+ struct hl_mon_state_dump *mon)
+{
+ /* Not implemented */
+ return 0;
+}
+
+
+static int goya_print_fences_single_engine(
+ struct hl_device *hdev, u64 base_offset, u64 status_base_offset,
+ enum hl_sync_engine_type engine_type, u32 engine_id, char **buf,
+ size_t *size, size_t *offset)
+{
+ /* Not implemented */
+ return 0;
+}
+
+
+static struct hl_state_dump_specs_funcs goya_state_dump_funcs = {
+ .monitor_valid = goya_monitor_valid,
+ .print_single_monitor = goya_print_single_monitor,
+ .gen_sync_to_engine_map = goya_gen_sync_to_engine_map,
+ .print_fences_single_engine = goya_print_fences_single_engine,
+};
+
+static void goya_state_dump_init(struct hl_device *hdev)
+{
+ /* Not implemented */
+ hdev->state_dump_specs.props = goya_state_dump_specs_props;
+ hdev->state_dump_specs.funcs = goya_state_dump_funcs;
+}
+
+static u32 goya_get_sob_addr(struct hl_device *hdev, u32 sob_id)
+{
+ return 0;
+}
+
+static u32 *goya_get_stream_master_qid_arr(void)
+{
+ return NULL;
+}
+
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
@@ -5536,7 +5598,7 @@ static const struct hl_asic_funcs goya_funcs = {
.halt_engines = goya_halt_engines,
.suspend = goya_suspend,
.resume = goya_resume,
- .cb_mmap = goya_cb_mmap,
+ .mmap = goya_mmap,
.ring_doorbell = goya_ring_doorbell,
.pqe_write = goya_pqe_write,
.asic_dma_alloc_coherent = goya_dma_alloc_coherent,
@@ -5609,7 +5671,11 @@ static const struct hl_asic_funcs goya_funcs = {
.enable_events_from_fw = goya_enable_events_from_fw,
.map_pll_idx_to_fw_idx = goya_map_pll_idx_to_fw_idx,
.init_firmware_loader = goya_init_firmware_loader,
- .init_cpu_scrambler_dram = goya_cpu_init_scrambler_dram
+ .init_cpu_scrambler_dram = goya_cpu_init_scrambler_dram,
+ .state_dump_init = goya_state_dump_init,
+ .get_sob_addr = &goya_get_sob_addr,
+ .set_pci_memory_regions = goya_set_pci_memory_regions,
+ .get_stream_master_qid_arr = goya_get_stream_master_qid_arr,
};
/*
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index 80b1d5a9d9f1..9ff6a448f0d4 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -98,6 +98,18 @@ struct hl_eq_fw_alive {
__u8 pad[7];
};
+enum hl_pcie_addr_dec_cause {
+ PCIE_ADDR_DEC_HBW_ERR_RESP,
+ PCIE_ADDR_DEC_LBW_ERR_RESP,
+ PCIE_ADDR_DEC_TLP_BLOCKED_BY_RR
+};
+
+struct hl_eq_pcie_addr_dec_data {
+ /* enum hl_pcie_addr_dec_cause */
+ __u8 addr_dec_cause;
+ __u8 pad[7];
+};
+
struct hl_eq_entry {
struct hl_eq_header hdr;
union {
@@ -106,6 +118,7 @@ struct hl_eq_entry {
struct hl_eq_sm_sei_data sm_sei_data;
struct cpucp_pkt_sync_err pkt_sync_err;
struct hl_eq_fw_alive fw_alive;
+ struct hl_eq_pcie_addr_dec_data pcie_addr_dec_data;
__le64 data[7];
};
};
@@ -116,7 +129,7 @@ struct hl_eq_entry {
#define EQ_CTL_READY_MASK 0x80000000
#define EQ_CTL_EVENT_TYPE_SHIFT 16
-#define EQ_CTL_EVENT_TYPE_MASK 0x03FF0000
+#define EQ_CTL_EVENT_TYPE_MASK 0x0FFF0000
#define EQ_CTL_INDEX_SHIFT 0
#define EQ_CTL_INDEX_MASK 0x0000FFFF
@@ -300,7 +313,7 @@ enum pq_init_status {
* The packet's arguments specify the desired sensor and the field to
* set.
*
- * CPUCP_PACKET_PCIE_THROUGHPUT_GET
+ * CPUCP_PACKET_PCIE_THROUGHPUT_GET -
* Get throughput of PCIe.
* The packet's arguments specify the transaction direction (TX/RX).
* The window measurement is 10[msec], and the return value is in KB/sec.
@@ -309,19 +322,19 @@ enum pq_init_status {
* Replay count measures number of "replay" events, which is basicly
* number of retries done by PCIe.
*
- * CPUCP_PACKET_TOTAL_ENERGY_GET
+ * CPUCP_PACKET_TOTAL_ENERGY_GET -
* Total Energy is measurement of energy from the time FW Linux
* is loaded. It is calculated by multiplying the average power
* by time (passed from armcp start). The units are in MilliJouls.
*
- * CPUCP_PACKET_PLL_INFO_GET
+ * CPUCP_PACKET_PLL_INFO_GET -
* Fetch frequencies of PLL from the required PLL IP.
* The packet's arguments specify the device PLL type
* Pll type is the PLL from device pll_index enum.
* The result is composed of 4 outputs, each is 16-bit
* frequency in MHz.
*
- * CPUCP_PACKET_POWER_GET
+ * CPUCP_PACKET_POWER_GET -
* Fetch the present power consumption of the device (Current * Voltage).
*
* CPUCP_PACKET_NIC_PFC_SET -
@@ -345,6 +358,24 @@ enum pq_init_status {
* CPUCP_PACKET_MSI_INFO_SET -
* set the index number for each supported msi type going from
* host to device
+ *
+ * CPUCP_PACKET_NIC_XPCS91_REGS_GET -
+ * Fetch the un/correctable counters values from the NIC MAC.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_GET -
+ * Fetch various NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_CLR -
+ * Clear the various NIC MAC counters in the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_ALL_GET -
+ * Fetch all NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_IS_IDLE_CHECK -
+ * Check if the device is IDLE in regard to the DMA/compute engines
+ * and QMANs. The f/w will return a bitmask where each bit represents
+ * a different engine or QMAN according to enum cpucp_idle_mask.
+ * The bit will be 1 if the engine is NOT idle.
*/
enum cpucp_packet_id {
@@ -385,6 +416,11 @@ enum cpucp_packet_id {
CPUCP_PACKET_NIC_LPBK_SET, /* internal */
CPUCP_PACKET_NIC_MAC_CFG, /* internal */
CPUCP_PACKET_MSI_INFO_SET, /* internal */
+ CPUCP_PACKET_NIC_XPCS91_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_CLR, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_ALL_GET, /* internal */
+ CPUCP_PACKET_IS_IDLE_CHECK, /* internal */
};
#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -414,6 +450,11 @@ enum cpucp_packet_id {
#define CPUCP_PKT_VAL_LPBK_IN2_SHIFT 1
#define CPUCP_PKT_VAL_LPBK_IN2_MASK 0x000000000000001Eull
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_MASK 0x00000000FFFFFFFEull
+
/* heartbeat status bits */
#define CPUCP_PKT_HB_STATUS_EQ_FAULT_SHIFT 0
#define CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK 0x00000001
@@ -467,7 +508,8 @@ struct cpucp_packet {
__le32 status_mask;
};
- __le32 reserved;
+ /* For NIC requests */
+ __le32 port_index;
};
struct cpucp_unmask_irq_arr_packet {
@@ -476,6 +518,12 @@ struct cpucp_unmask_irq_arr_packet {
__le32 irqs[0];
};
+struct cpucp_nic_status_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 data[0];
+};
+
struct cpucp_array_data_packet {
struct cpucp_packet cpucp_pkt;
__le32 length;
@@ -595,6 +643,18 @@ enum pll_index {
PLL_MAX
};
+enum rl_index {
+ TPC_RL = 0,
+ MME_RL,
+};
+
+enum pvt_index {
+ PVT_SW,
+ PVT_SE,
+ PVT_NW,
+ PVT_NE
+};
+
/* Event Queue Packets */
struct eq_generic_event {
@@ -700,6 +760,15 @@ struct cpucp_mac_addr {
__u8 mac_addr[ETH_ALEN];
};
+enum cpucp_serdes_type {
+ TYPE_1_SERDES_TYPE,
+ TYPE_2_SERDES_TYPE,
+ HLS1_SERDES_TYPE,
+ HLS1H_SERDES_TYPE,
+ UNKNOWN_SERDES_TYPE,
+ MAX_NUM_SERDES_TYPE = UNKNOWN_SERDES_TYPE
+};
+
struct cpucp_nic_info {
struct cpucp_mac_addr mac_addrs[CPUCP_MAX_NICS];
__le64 link_mask[CPUCP_NIC_MASK_ARR_LEN];
@@ -708,6 +777,40 @@ struct cpucp_nic_info {
__le64 link_ext_mask[CPUCP_NIC_MASK_ARR_LEN];
__u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN];
__le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __le16 serdes_type; /* enum cpucp_serdes_type */
+ __u8 reserved[6];
+};
+
+/*
+ * struct cpucp_nic_status - describes the status of a NIC port.
+ * @port: NIC port index.
+ * @bad_format_cnt: e.g. CRC.
+ * @responder_out_of_sequence_psn_cnt: e.g NAK.
+ * @high_ber_reinit_cnt: link reinit due to high BER.
+ * @correctable_err_cnt: e.g. bit-flip.
+ * @uncorrectable_err_cnt: e.g. MAC errors.
+ * @retraining_cnt: re-training counter.
+ * @up: is port up.
+ * @pcs_link: has PCS link.
+ * @phy_ready: is PHY ready.
+ * @auto_neg: is Autoneg enabled.
+ * @timeout_retransmission_cnt: timeout retransmission events
+ * @high_ber_cnt: high ber events
+ */
+struct cpucp_nic_status {
+ __le32 port;
+ __le32 bad_format_cnt;
+ __le32 responder_out_of_sequence_psn_cnt;
+ __le32 high_ber_reinit;
+ __le32 correctable_err_cnt;
+ __le32 uncorrectable_err_cnt;
+ __le32 retraining_cnt;
+ __u8 up;
+ __u8 pcs_link;
+ __u8 phy_ready;
+ __u8 auto_neg;
+ __le32 timeout_retransmission_cnt;
+ __le32 high_ber_cnt;
};
#endif /* CPUCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index fa8a5ad2d438..3099653234e4 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -78,6 +78,26 @@
* CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL Device is unusable and customer support
* should be contacted.
*
+ * CPU_BOOT_ERR0_ARC0_HALT_ACK_NOT_RCVD HALT ACK from ARC0 is not received
+ * within specified retries after issuing
+ * HALT request. ARC0 appears to be in bad
+ * reset.
+ *
+ * CPU_BOOT_ERR0_ARC1_HALT_ACK_NOT_RCVD HALT ACK from ARC1 is not received
+ * within specified retries after issuing
+ * HALT request. ARC1 appears to be in bad
+ * reset.
+ *
+ * CPU_BOOT_ERR0_ARC0_RUN_ACK_NOT_RCVD RUN ACK from ARC0 is not received
+ * within specified timeout after issuing
+ * RUN request. ARC0 appears to be in bad
+ * reset.
+ *
+ * CPU_BOOT_ERR0_ARC1_RUN_ACK_NOT_RCVD RUN ACK from ARC1 is not received
+ * within specified timeout after issuing
+ * RUN request. ARC1 appears to be in bad
+ * reset.
+ *
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
* running FW populates the error
@@ -98,6 +118,10 @@
#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << 11)
#define CPU_BOOT_ERR0_PLL_FAIL (1 << 12)
#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << 13)
+#define CPU_BOOT_ERR0_ARC0_HALT_ACK_NOT_RCVD (1 << 14)
+#define CPU_BOOT_ERR0_ARC1_HALT_ACK_NOT_RCVD (1 << 15)
+#define CPU_BOOT_ERR0_ARC0_RUN_ACK_NOT_RCVD (1 << 16)
+#define CPU_BOOT_ERR0_ARC1_RUN_ACK_NOT_RCVD (1 << 17)
#define CPU_BOOT_ERR0_ENABLED (1 << 31)
#define CPU_BOOT_ERR1_ENABLED (1 << 31)
@@ -186,6 +210,10 @@
* configured and is ready for use.
* Initialized in: ppboot
*
+ * CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN NIC MAC channels init is done by FW and
+ * any access to them is done via the FW.
+ * Initialized in: linux
+ *
* CPU_BOOT_DEV_STS0_DYN_PLL_EN Dynamic PLL configuration is enabled.
* FW sends to host a bitmap of supported
* PLLs.
@@ -209,6 +237,21 @@
* prevent IRQs overriding each other.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN
+ * NIC STAT and XPCS91 access is restricted
+ * and is done via FW only.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN
+ * NIC STAT get all is supported.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN
+ * F/W checks if the device is idle by reading defined set
+ * of registers. It returns a bitmask of all the engines,
+ * where a bit is set if the engine is not idle.
+ * Initialized in: linux
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -236,10 +279,14 @@
#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << 15)
#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << 16)
#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << 17)
+#define CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN (1 << 18)
#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << 19)
#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << 20)
#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << 21)
#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << 22)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN (1 << 23)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << 24)
+#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << 25)
#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31)
#define CPU_BOOT_DEV_STS1_ENABLED (1 << 31)
@@ -313,10 +360,7 @@ struct cpu_dyn_regs {
__le32 hw_state;
__le32 kmd_msg_to_cpu;
__le32 cpu_cmd_status_to_host;
- union {
- __le32 gic_host_irq_ctrl;
- __le32 gic_host_pi_upd_irq;
- };
+ __le32 gic_host_pi_upd_irq;
__le32 gic_tpc_qm_irq_ctrl;
__le32 gic_mme_qm_irq_ctrl;
__le32 gic_dma_qm_irq_ctrl;
@@ -324,7 +368,9 @@ struct cpu_dyn_regs {
__le32 gic_dma_core_irq_ctrl;
__le32 gic_host_halt_irq;
__le32 gic_host_ints_irq;
- __le32 reserved1[24]; /* reserve for future use */
+ __le32 gic_host_soft_rst_irq;
+ __le32 gic_rot_qm_irq_ctrl;
+ __le32 reserved1[22]; /* reserve for future use */
};
/* TODO: remove the desc magic after the code is updated to use message */
@@ -462,6 +508,11 @@ struct lkd_fw_comms_msg {
* Do not wait for BMC response.
*
* COMMS_LOW_PLL_OPP Initialize PLLs for low OPP.
+ *
+ * COMMS_PREP_DESC_ELBI Same as COMMS_PREP_DESC only that the memory
+ * space is allocated in a ELBI access only
+ * address range.
+ *
*/
enum comms_cmd {
COMMS_NOOP = 0,
@@ -474,6 +525,7 @@ enum comms_cmd {
COMMS_GOTO_WFE = 7,
COMMS_SKIP_BMC = 8,
COMMS_LOW_PLL_OPP = 9,
+ COMMS_PREP_DESC_ELBI = 10,
COMMS_INVLD_LAST
};
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
index 5bb54b34a8ae..ffdfbd9b3220 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
@@ -126,6 +126,9 @@
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 0x4F2004
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 0x4F3FFC
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 0x4F4000
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 0x4F4800
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_DATA_0 0x4F5000
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_ARM_0 0x4F5800
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0 0x4F6000
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_511 0x4F67FC
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
index 9aea7e996654..acc85d3ed98b 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -449,4 +449,21 @@ enum axi_id {
#define PCIE_AUX_FLR_CTRL_HW_CTRL_MASK 0x1
#define PCIE_AUX_FLR_CTRL_INT_MASK_MASK 0x2
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_VALID_SHIFT 0
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_VALID_MASK 0x1
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_PENDING_SHIFT 1
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_PENDING_MASK 0x1FE
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_SHIFT 0
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_MASK 0xFF
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_SHIFT 8
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_MASK 0xFF00
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOP_SHIFT 16
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOP_MASK 0x10000
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOD_SHIFT 17
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOD_MASK 0xFFFE0000
+#define TPC0_QM_CP_STS_0_FENCE_ID_SHIFT 20
+#define TPC0_QM_CP_STS_0_FENCE_ID_MASK 0x300000
+#define TPC0_QM_CP_STS_0_FENCE_IN_PROGRESS_SHIFT 22
+#define TPC0_QM_CP_STS_0_FENCE_IN_PROGRESS_MASK 0x400000
+
#endif /* GAUDI_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
index d95d4162ae2c..b9bd5a7f71eb 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
@@ -12,8 +12,6 @@
* PSOC scratch-pad registers
*/
#define mmHW_STATE mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
-/* TODO: remove mmGIC_HOST_IRQ_CTRL_POLL_REG */
-#define mmGIC_HOST_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
#define mmGIC_HOST_PI_UPD_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
#define mmGIC_TPC_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
#define mmGIC_MME_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index 7ac788fae1b8..c394c0b08519 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
@@ -271,7 +271,6 @@ struct lis3lv02d {
int regs_size;
u8 *reg_cache;
bool regs_stored;
- bool init_required;
u8 odr_mask; /* ODR bit mask */
u8 whoami; /* indicates measurement precision */
s16 (*read_data) (struct lis3lv02d *lis3, int reg);
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 95b1c6800a22..fe6fd34b8caf 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
+#include <linux/utsname.h>
#define DEFAULT_COUNT 10
@@ -210,6 +211,8 @@ module_param(cpoint_count, int, 0644);
MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
"crash point is to be hit to trigger action");
+/* For test debug reporting. */
+char *lkdtm_kernel_info;
/* Return the crashtype number or NULL if the name is invalid */
static const struct crashtype *find_crashtype(const char *name)
@@ -490,6 +493,11 @@ static int __init lkdtm_module_init(void)
crash_count = cpoint_count;
#endif
+ /* Common initialization. */
+ lkdtm_kernel_info = kasprintf(GFP_KERNEL, "kernel (%s %s)",
+ init_uts_ns.name.release,
+ init_uts_ns.name.machine);
+
/* Handle test-specific initialization. */
lkdtm_bugs_init(&recur_count);
lkdtm_perms_init();
@@ -538,6 +546,8 @@ static void __exit lkdtm_module_exit(void)
if (lkdtm_kprobe != NULL)
unregister_kprobe(lkdtm_kprobe);
+ kfree(lkdtm_kernel_info);
+
pr_info("Crash point unregistered\n");
}
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index d7d64d9765eb..c212a253edde 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -5,17 +5,17 @@
#define pr_fmt(fmt) "lkdtm: " fmt
#include <linux/kernel.h>
-#include <generated/compile.h>
-#include <generated/utsrelease.h>
-#define LKDTM_KERNEL "kernel (" UTS_RELEASE " " UTS_MACHINE ")"
+extern char *lkdtm_kernel_info;
#define pr_expected_config(kconfig) \
{ \
if (IS_ENABLED(kconfig)) \
- pr_err("Unexpected! This " LKDTM_KERNEL " was built with " #kconfig "=y\n"); \
+ pr_err("Unexpected! This %s was built with " #kconfig "=y\n", \
+ lkdtm_kernel_info); \
else \
- pr_warn("This is probably expected, since this " LKDTM_KERNEL " was built *without* " #kconfig "=y\n"); \
+ pr_warn("This is probably expected, since this %s was built *without* " #kconfig "=y\n", \
+ lkdtm_kernel_info); \
}
#ifndef MODULE
@@ -25,24 +25,30 @@ int lkdtm_check_bool_cmdline(const char *param);
if (IS_ENABLED(kconfig)) { \
switch (lkdtm_check_bool_cmdline(param)) { \
case 0: \
- pr_warn("This is probably expected, since this " LKDTM_KERNEL " was built with " #kconfig "=y but booted with '" param "=N'\n"); \
+ pr_warn("This is probably expected, since this %s was built with " #kconfig "=y but booted with '" param "=N'\n", \
+ lkdtm_kernel_info); \
break; \
case 1: \
- pr_err("Unexpected! This " LKDTM_KERNEL " was built with " #kconfig "=y and booted with '" param "=Y'\n"); \
+ pr_err("Unexpected! This %s was built with " #kconfig "=y and booted with '" param "=Y'\n", \
+ lkdtm_kernel_info); \
break; \
default: \
- pr_err("Unexpected! This " LKDTM_KERNEL " was built with " #kconfig "=y (and booted without '" param "' specified)\n"); \
+ pr_err("Unexpected! This %s was built with " #kconfig "=y (and booted without '" param "' specified)\n", \
+ lkdtm_kernel_info); \
} \
} else { \
switch (lkdtm_check_bool_cmdline(param)) { \
case 0: \
- pr_warn("This is probably expected, as this " LKDTM_KERNEL " was built *without* " #kconfig "=y and booted with '" param "=N'\n"); \
+ pr_warn("This is probably expected, as this %s was built *without* " #kconfig "=y and booted with '" param "=N'\n", \
+ lkdtm_kernel_info); \
break; \
case 1: \
- pr_err("Unexpected! This " LKDTM_KERNEL " was built *without* " #kconfig "=y but booted with '" param "=Y'\n"); \
+ pr_err("Unexpected! This %s was built *without* " #kconfig "=y but booted with '" param "=Y'\n", \
+ lkdtm_kernel_info); \
break; \
default: \
- pr_err("This is probably expected, since this " LKDTM_KERNEL " was built *without* " #kconfig "=y (and booted without '" param "' specified)\n"); \
+ pr_err("This is probably expected, since this %s was built *without* " #kconfig "=y (and booted without '" param "' specified)\n", \
+ lkdtm_kernel_info); \
break; \
} \
} \
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index d1137a95ad02..2ed7e3aaff3a 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -69,6 +69,8 @@
#define FLAG_USE_DMA BIT(0)
#define PCI_DEVICE_ID_TI_AM654 0xb00c
+#define PCI_DEVICE_ID_TI_J7200 0xb00f
+#define PCI_DEVICE_ID_TI_AM64 0xb010
#define PCI_DEVICE_ID_LS1088A 0x80c0
#define is_am654_pci_dev(pdev) \
@@ -970,6 +972,12 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
.driver_data = (kernel_ulong_t)&j721e_data,
},
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
@@ -979,6 +987,7 @@ static struct pci_driver pci_endpoint_test_driver = {
.id_table = pci_endpoint_test_tbl,
.probe = pci_endpoint_test_probe,
.remove = pci_endpoint_test_remove,
+ .sriov_configure = pci_sriov_configure_simple,
};
module_pci_driver(pci_endpoint_test_driver);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index cb1a64a5c256..80a2c270d502 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -578,10 +578,6 @@ static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
}
}
data->bytes_xfered += miter->length;
-
- /* This can go away once MIPS implements
- * flush_kernel_dcache_page */
- flush_dcache_page(miter->page);
}
sg_miter_stop(miter);
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index a1bcde3395a6..f4c8e1a61f53 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -941,7 +941,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
/* discard mappings */
if (direction == DMA_FROM_DEVICE)
- flush_kernel_dcache_page(sg_page(sg));
+ flush_dcache_page(sg_page(sg));
kunmap(sg_page(sg));
if (dma_dev)
dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 8bab6f8718a9..796a2eccbef0 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -45,10 +45,9 @@ config MTD_BLOCK
on RAM chips in this manner. This block device is a user of MTD
devices performing that function.
- At the moment, it is also required for the Journalling Flash File
- System(s) to obtain a handle on the MTD device when it's mounted
- (although JFFS and JFFS2 don't actually use any of the functionality
- of the mtdblock device).
+ Note that mounting a JFFS2 filesystem doesn't require using mtdblock.
+ It's possible to mount a rootfs using the MTD device on the "root="
+ bootargs as "root=mtd2" or "root=mtd:name_of_device".
Later, it may be extended to perform read/erase/modify/write cycles
on flash chips to emulate a smaller block size. Needless to say,
@@ -70,6 +69,9 @@ config MTD_BLOCK_RO
You do not need this option for use with the DiskOnChip devices. For
those, enable NFTL support (CONFIG_NFTL) instead.
+comment "Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK."
+ depends on MTD_BLOCK || MTD_BLOCK_RO
+
config FTL
tristate "FTL (Flash Translation Layer) support"
depends on BLOCK
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 9b33c082179d..f655d2905270 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -1029,7 +1029,7 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
partition->mbd.tr = tr;
partition->mbd.devnum = -1;
- if (!add_mtd_blktrans_dev((void *)partition))
+ if (!add_mtd_blktrans_dev(&partition->mbd))
return;
}
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 6650acbc961e..aaa164b977fe 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -127,29 +127,6 @@ config MTD_PHYSMAP_GPIO_ADDR
Extend the physmap driver to allow flashes to be partially
physically addressed and assisted by GPIOs.
-config MTD_PMC_MSP_EVM
- tristate "CFI Flash device mapped on PMC-Sierra MSP"
- depends on PMC_MSP && MTD_CFI
- help
- This provides a 'mapping' driver which supports the way
- in which user-programmable flash chips are connected on the
- PMC-Sierra MSP eval/demo boards.
-
-choice
- prompt "Maximum mappable memory available for flash IO"
- depends on MTD_PMC_MSP_EVM
- default MSP_FLASH_MAP_LIMIT_32M
-
-config MSP_FLASH_MAP_LIMIT_32M
- bool "32M"
-
-endchoice
-
-config MSP_FLASH_MAP_LIMIT
- hex
- default "0x02000000"
- depends on MSP_FLASH_MAP_LIMIT_32M
-
config MTD_SUN_UFLASH
tristate "Sun Microsystems userflash support"
depends on SPARC && MTD_CFI && PCI
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 79f018cf412f..11fea9c8d561 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -25,7 +25,6 @@ physmap-objs-$(CONFIG_MTD_PHYSMAP_IXP4XX) += physmap-ixp4xx.o
physmap-objs := $(physmap-objs-y)
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PISMO) += pismo.o
-obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
deleted file mode 100644
index 2051f28ddac6..000000000000
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Mapping of a custom board with both AMD CFI and JEDEC flash in partitions.
- * Config with both CFI and JEDEC device support.
- *
- * Basically physmap.c with the addition of partitions and
- * an array of mapping info to accommodate more than one flash type per board.
- *
- * Copyright 2005-2007 PMC-Sierra, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-
-#include <msp_prom.h>
-#include <msp_regs.h>
-
-
-static struct mtd_info **msp_flash;
-static struct mtd_partition **msp_parts;
-static struct map_info *msp_maps;
-static int fcnt;
-
-#define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n", __func__, __LINE__)
-
-static int __init init_msp_flash(void)
-{
- int i, j, ret = -ENOMEM;
- int offset, coff;
- char *env;
- int pcnt;
- char flash_name[] = "flash0";
- char part_name[] = "flash0_0";
- unsigned addr, size;
-
- /* If ELB is disabled by "ful-mux" mode, we can't get at flash */
- if ((*DEV_ID_REG & DEV_ID_SINGLE_PC) &&
- (*ELB_1PC_EN_REG & SINGLE_PCCARD)) {
- printk(KERN_NOTICE "Single PC Card mode: no flash access\n");
- return -ENXIO;
- }
-
- /* examine the prom environment for flash devices */
- for (fcnt = 0; (env = prom_getenv(flash_name)); fcnt++)
- flash_name[5] = '0' + fcnt + 1;
-
- if (fcnt < 1)
- return -ENXIO;
-
- printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
-
- msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL);
- if (!msp_flash)
- return -ENOMEM;
-
- msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL);
- if (!msp_parts)
- goto free_msp_flash;
-
- msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL);
- if (!msp_maps)
- goto free_msp_parts;
-
- /* loop over the flash devices, initializing each */
- for (i = 0; i < fcnt; i++) {
- /* examine the prom environment for flash partititions */
- part_name[5] = '0' + i;
- part_name[7] = '0';
- for (pcnt = 0; (env = prom_getenv(part_name)); pcnt++)
- part_name[7] = '0' + pcnt + 1;
-
- if (pcnt == 0) {
- printk(KERN_NOTICE "Skipping flash device %d "
- "(no partitions defined)\n", i);
- continue;
- }
-
- msp_parts[i] = kcalloc(pcnt, sizeof(struct mtd_partition),
- GFP_KERNEL);
- if (!msp_parts[i])
- goto cleanup_loop;
-
- /* now initialize the devices proper */
- flash_name[5] = '0' + i;
- env = prom_getenv(flash_name);
-
- if (sscanf(env, "%x:%x", &addr, &size) < 2) {
- ret = -ENXIO;
- kfree(msp_parts[i]);
- goto cleanup_loop;
- }
- addr = CPHYSADDR(addr);
-
- printk(KERN_NOTICE
- "MSP flash device \"%s\": 0x%08x at 0x%08x\n",
- flash_name, size, addr);
- /* This must matchs the actual size of the flash chip */
- msp_maps[i].size = size;
- msp_maps[i].phys = addr;
-
- /*
- * Platforms have a specific limit of the size of memory
- * which may be mapped for flash:
- */
- if (size > CONFIG_MSP_FLASH_MAP_LIMIT)
- size = CONFIG_MSP_FLASH_MAP_LIMIT;
-
- msp_maps[i].virt = ioremap(addr, size);
- if (msp_maps[i].virt == NULL) {
- ret = -ENXIO;
- kfree(msp_parts[i]);
- goto cleanup_loop;
- }
-
- msp_maps[i].bankwidth = 1;
- msp_maps[i].name = kstrndup(flash_name, 7, GFP_KERNEL);
- if (!msp_maps[i].name) {
- iounmap(msp_maps[i].virt);
- kfree(msp_parts[i]);
- goto cleanup_loop;
- }
-
- for (j = 0; j < pcnt; j++) {
- part_name[5] = '0' + i;
- part_name[7] = '0' + j;
-
- env = prom_getenv(part_name);
-
- if (sscanf(env, "%x:%x:%n", &offset, &size,
- &coff) < 2) {
- ret = -ENXIO;
- kfree(msp_maps[i].name);
- iounmap(msp_maps[i].virt);
- kfree(msp_parts[i]);
- goto cleanup_loop;
- }
-
- msp_parts[i][j].size = size;
- msp_parts[i][j].offset = offset;
- msp_parts[i][j].name = env + coff;
- }
-
- /* now probe and add the device */
- simple_map_init(&msp_maps[i]);
- msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]);
- if (msp_flash[i]) {
- msp_flash[i]->owner = THIS_MODULE;
- mtd_device_register(msp_flash[i], msp_parts[i], pcnt);
- } else {
- printk(KERN_ERR "map probe failed for flash\n");
- ret = -ENXIO;
- kfree(msp_maps[i].name);
- iounmap(msp_maps[i].virt);
- kfree(msp_parts[i]);
- goto cleanup_loop;
- }
- }
-
- return 0;
-
-cleanup_loop:
- while (i--) {
- mtd_device_unregister(msp_flash[i]);
- map_destroy(msp_flash[i]);
- kfree(msp_maps[i].name);
- iounmap(msp_maps[i].virt);
- kfree(msp_parts[i]);
- }
- kfree(msp_maps);
-free_msp_parts:
- kfree(msp_parts);
-free_msp_flash:
- kfree(msp_flash);
- return ret;
-}
-
-static void __exit cleanup_msp_flash(void)
-{
- int i;
-
- for (i = 0; i < fcnt; i++) {
- mtd_device_unregister(msp_flash[i]);
- map_destroy(msp_flash[i]);
- iounmap((void *)msp_maps[i].virt);
-
- /* free the memory */
- kfree(msp_maps[i].name);
- kfree(msp_parts[i]);
- }
-
- kfree(msp_flash);
- kfree(msp_parts);
- kfree(msp_maps);
-}
-
-MODULE_AUTHOR("PMC-Sierra, Inc");
-MODULE_DESCRIPTION("MTD map driver for PMC-Sierra MSP boards");
-MODULE_LICENSE("GPL");
-
-module_init(init_msp_flash);
-module_exit(cleanup_msp_flash);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 44bea3f65060..b8ae1ec14e17 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -23,7 +23,6 @@
#include "mtdcore.h"
static LIST_HEAD(blktrans_majors);
-static DEFINE_MUTEX(blktrans_ref_mutex);
static void blktrans_dev_release(struct kref *kref)
{
@@ -37,26 +36,9 @@ static void blktrans_dev_release(struct kref *kref)
kfree(dev);
}
-static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
-{
- struct mtd_blktrans_dev *dev;
-
- mutex_lock(&blktrans_ref_mutex);
- dev = disk->private_data;
-
- if (!dev)
- goto unlock;
- kref_get(&dev->ref);
-unlock:
- mutex_unlock(&blktrans_ref_mutex);
- return dev;
-}
-
static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
{
- mutex_lock(&blktrans_ref_mutex);
kref_put(&dev->ref, blktrans_dev_release);
- mutex_unlock(&blktrans_ref_mutex);
}
@@ -201,19 +183,16 @@ static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
static int blktrans_open(struct block_device *bdev, fmode_t mode)
{
- struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
int ret = 0;
- if (!dev)
- return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
+ kref_get(&dev->ref);
- mutex_lock(&mtd_table_mutex);
mutex_lock(&dev->lock);
if (dev->open)
goto unlock;
- kref_get(&dev->ref);
__module_get(dev->tr->owner);
if (!dev->mtd)
@@ -233,8 +212,6 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
unlock:
dev->open++;
mutex_unlock(&dev->lock);
- mutex_unlock(&mtd_table_mutex);
- blktrans_dev_put(dev);
return ret;
error_release:
@@ -242,27 +219,20 @@ error_release:
dev->tr->release(dev);
error_put:
module_put(dev->tr->owner);
- kref_put(&dev->ref, blktrans_dev_release);
mutex_unlock(&dev->lock);
- mutex_unlock(&mtd_table_mutex);
blktrans_dev_put(dev);
return ret;
}
static void blktrans_release(struct gendisk *disk, fmode_t mode)
{
- struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
-
- if (!dev)
- return;
+ struct mtd_blktrans_dev *dev = disk->private_data;
- mutex_lock(&mtd_table_mutex);
mutex_lock(&dev->lock);
if (--dev->open)
goto unlock;
- kref_put(&dev->ref, blktrans_dev_release);
module_put(dev->tr->owner);
if (dev->mtd) {
@@ -272,18 +242,14 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode)
}
unlock:
mutex_unlock(&dev->lock);
- mutex_unlock(&mtd_table_mutex);
blktrans_dev_put(dev);
}
static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
int ret = -ENXIO;
- if (!dev)
- return ret;
-
mutex_lock(&dev->lock);
if (!dev->mtd)
@@ -292,7 +258,6 @@ static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
unlock:
mutex_unlock(&dev->lock);
- blktrans_dev_put(dev);
return ret;
}
@@ -315,12 +280,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
struct gendisk *gd;
int ret;
- if (mutex_trylock(&mtd_table_mutex)) {
- mutex_unlock(&mtd_table_mutex);
- BUG();
- }
+ lockdep_assert_held(&mtd_table_mutex);
- mutex_lock(&blktrans_ref_mutex);
list_for_each_entry(d, &tr->devs, list) {
if (new->devnum == -1) {
/* Use first free number */
@@ -332,7 +293,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
} else if (d->devnum == new->devnum) {
/* Required number taken */
- mutex_unlock(&blktrans_ref_mutex);
return -EBUSY;
} else if (d->devnum > new->devnum) {
/* Required number was free */
@@ -350,14 +310,11 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
* minor numbers and that the disk naming code below can cope
* with this number. */
if (new->devnum > (MINORMASK >> tr->part_bits) ||
- (tr->part_bits && new->devnum >= 27 * 26)) {
- mutex_unlock(&blktrans_ref_mutex);
+ (tr->part_bits && new->devnum >= 27 * 26))
return ret;
- }
list_add_tail(&new->list, &tr->devs);
added:
- mutex_unlock(&blktrans_ref_mutex);
mutex_init(&new->lock);
kref_init(&new->ref);
@@ -449,10 +406,7 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{
unsigned long flags;
- if (mutex_trylock(&mtd_table_mutex)) {
- mutex_unlock(&mtd_table_mutex);
- BUG();
- }
+ lockdep_assert_held(&mtd_table_mutex);
if (old->disk_attributes)
sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index a80809543793..03e3de3a5d79 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -322,6 +322,10 @@ static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
if (!(mtd->flags & MTD_WRITEABLE))
dev->mbd.readonly = 1;
+ if (mtd_type_is_nand(mtd))
+ pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
+ tr->name, mtd->name);
+
if (add_mtd_blktrans_dev(&dev->mbd))
kfree(dev);
}
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index d92914f73d52..7c51952ce55d 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -46,6 +46,10 @@ static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
dev->tr = tr;
dev->readonly = 1;
+ if (mtd_type_is_nand(mtd))
+ pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
+ tr->name, mtd->name);
+
if (add_mtd_blktrans_dev(dev))
kfree(dev);
}
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 6e4d0017c0bd..f685a581df48 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -641,6 +641,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
int i;
size_t size;
struct mtd_concat *concat;
+ struct mtd_info *subdev_master = NULL;
uint32_t max_erasesize, curr_erasesize;
int num_erase_region;
int max_writebufsize = 0;
@@ -679,18 +680,24 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd.subpage_sft = subdev[0]->subpage_sft;
concat->mtd.oobsize = subdev[0]->oobsize;
concat->mtd.oobavail = subdev[0]->oobavail;
- if (subdev[0]->_writev)
+
+ subdev_master = mtd_get_master(subdev[0]);
+ if (subdev_master->_writev)
concat->mtd._writev = concat_writev;
- if (subdev[0]->_read_oob)
+ if (subdev_master->_read_oob)
concat->mtd._read_oob = concat_read_oob;
- if (subdev[0]->_write_oob)
+ if (subdev_master->_write_oob)
concat->mtd._write_oob = concat_write_oob;
- if (subdev[0]->_block_isbad)
+ if (subdev_master->_block_isbad)
concat->mtd._block_isbad = concat_block_isbad;
- if (subdev[0]->_block_markbad)
+ if (subdev_master->_block_markbad)
concat->mtd._block_markbad = concat_block_markbad;
- if (subdev[0]->_panic_write)
+ if (subdev_master->_panic_write)
concat->mtd._panic_write = concat_panic_write;
+ if (subdev_master->_read)
+ concat->mtd._read = concat_read;
+ if (subdev_master->_write)
+ concat->mtd._write = concat_write;
concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
@@ -721,14 +728,22 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
subdev[i]->flags & MTD_WRITEABLE;
}
+ subdev_master = mtd_get_master(subdev[i]);
concat->mtd.size += subdev[i]->size;
concat->mtd.ecc_stats.badblocks +=
subdev[i]->ecc_stats.badblocks;
if (concat->mtd.writesize != subdev[i]->writesize ||
concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
concat->mtd.oobsize != subdev[i]->oobsize ||
- !concat->mtd._read_oob != !subdev[i]->_read_oob ||
- !concat->mtd._write_oob != !subdev[i]->_write_oob) {
+ !concat->mtd._read_oob != !subdev_master->_read_oob ||
+ !concat->mtd._write_oob != !subdev_master->_write_oob) {
+ /*
+ * Check against subdev[i] for data members, because
+ * subdev's attributes may be different from master
+ * mtd device. Check against subdev's master mtd
+ * device for callbacks, because the existence of
+ * subdev's callbacks is decided by master mtd device.
+ */
kfree(concat);
printk("Incompatible OOB or ECC data on \"%s\"\n",
subdev[i]->name);
@@ -744,8 +759,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd.name = name;
concat->mtd._erase = concat_erase;
- concat->mtd._read = concat_read;
- concat->mtd._write = concat_write;
concat->mtd._sync = concat_sync;
concat->mtd._lock = concat_lock;
concat->mtd._unlock = concat_unlock;
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 630728de4b7c..67b7cb67c030 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -480,9 +480,9 @@ config MTD_NAND_RICOH
select MTD_SM_COMMON
help
Enable support for Ricoh R5C852 xD card reader
- You also need to enable ether
+ You also need to enable either
NAND SSFDC (SmartMedia) read only translation layer' or new
- expermental, readwrite
+ experimental, readwrite
'SmartMedia/xD new translation layer'
config MTD_NAND_DISKONCHIP
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index d0e8ffd55c22..9dbf031716a6 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -751,7 +751,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
"CAFE NAND", mtd);
if (err) {
dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
- goto out_ior;
+ goto out_free_rs;
}
/* Disable master reset, enable NAND clock */
@@ -795,6 +795,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
free_irq(pdev->irq, mtd);
+ out_free_rs:
+ free_rs(cafe->rs);
out_ior:
pci_iounmap(pdev, cafe->mmio);
out_free_mtd:
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
index 8b49fd56cf96..b9784f3da7a1 100644
--- a/drivers/mtd/nand/raw/intel-nand-controller.c
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/units.h>
#include <asm/unaligned.h>
#define EBU_CLC 0x000
@@ -102,7 +103,6 @@
#define MAX_CS 2
-#define HZ_PER_MHZ 1000000L
#define USEC_PER_SEC 1000000L
struct ebu_nand_cs {
@@ -631,19 +631,26 @@ static int ebu_nand_probe(struct platform_device *pdev)
ebu_host->clk_rate = clk_get_rate(ebu_host->clk);
ebu_host->dma_tx = dma_request_chan(dev, "tx");
- if (IS_ERR(ebu_host->dma_tx))
- return dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
- "failed to request DMA tx chan!.\n");
+ if (IS_ERR(ebu_host->dma_tx)) {
+ ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
+ "failed to request DMA tx chan!.\n");
+ goto err_disable_unprepare_clk;
+ }
ebu_host->dma_rx = dma_request_chan(dev, "rx");
- if (IS_ERR(ebu_host->dma_rx))
- return dev_err_probe(dev, PTR_ERR(ebu_host->dma_rx),
- "failed to request DMA rx chan!.\n");
+ if (IS_ERR(ebu_host->dma_rx)) {
+ ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_rx),
+ "failed to request DMA rx chan!.\n");
+ ebu_host->dma_rx = NULL;
+ goto err_cleanup_dma;
+ }
resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
- if (!res)
- return -EINVAL;
+ if (!res) {
+ ret = -EINVAL;
+ goto err_cleanup_dma;
+ }
ebu_host->cs[cs].addr_sel = res->start;
writel(ebu_host->cs[cs].addr_sel | EBU_ADDR_MASK(5) | EBU_ADDR_SEL_REGEN,
ebu_host->ebu + EBU_ADDR_SEL(cs));
@@ -653,7 +660,8 @@ static int ebu_nand_probe(struct platform_device *pdev)
mtd = nand_to_mtd(&ebu_host->chip);
if (!mtd->name) {
dev_err(ebu_host->dev, "NAND label property is mandatory\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_cleanup_dma;
}
mtd->dev.parent = dev;
@@ -681,6 +689,7 @@ err_clean_nand:
nand_cleanup(&ebu_host->chip);
err_cleanup_dma:
ebu_dma_cleanup(ebu_host);
+err_disable_unprepare_clk:
clk_disable_unprepare(ebu_host->clk);
return ret;
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 817bddccb775..ac3be92872d0 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -580,7 +580,7 @@ static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand,
u32 *addrs = nfc->cmdfifo.rw.addrs;
u32 cs = nfc->param.chip_select;
u32 cmd0, cmd_num, row_start;
- int ret = 0, i;
+ int i;
cmd_num = sizeof(struct nand_rw_cmd) / sizeof(int);
@@ -620,7 +620,7 @@ static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand,
meson_nfc_cmd_idle(nfc, nfc->timing.tadl);
}
- return ret;
+ return 0;
}
static int meson_nfc_write_page_sub(struct nand_chip *nand,
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index dced32a126d9..b7ad030225f8 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -447,6 +447,35 @@ static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
return 0;
}
+/* Check if a potential BBT block is marked as bad */
+static int bbt_block_checkbad(struct nand_chip *this, struct nand_bbt_descr *td,
+ loff_t offs, uint8_t *buf)
+{
+ struct nand_bbt_descr *bd = this->badblock_pattern;
+
+ /*
+ * No need to check for a bad BBT block if the BBM area overlaps with
+ * the bad block table marker area in OOB since writing a BBM here
+ * invalidates the bad block table marker anyway.
+ */
+ if (!(td->options & NAND_BBT_NO_OOB) &&
+ td->offs >= bd->offs && td->offs < bd->offs + bd->len)
+ return 0;
+
+ /*
+ * There is no point in checking for a bad block marker if writing
+ * such marker is not supported
+ */
+ if (this->bbt_options & NAND_BBT_NO_OOB_BBM ||
+ this->options & NAND_NO_BBM_QUIRK)
+ return 0;
+
+ if (scan_block_fast(this, bd, offs, buf) > 0)
+ return 1;
+
+ return 0;
+}
+
/**
* create_bbt - [GENERIC] Create a bad block table by scanning the device
* @this: NAND chip object
@@ -560,6 +589,10 @@ static int search_bbt(struct nand_chip *this, uint8_t *buf,
int actblock = startblock + dir * block;
loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
+ /* Check if block is marked bad */
+ if (bbt_block_checkbad(this, td, offs, buf))
+ continue;
+
/* Read first page */
scan_read(this, buf, offs, mtd->writesize, td);
if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index b1839eef5b65..b26d4947af02 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -911,7 +911,7 @@ static int omap_correct_data(struct nand_chip *chip, u_char *dat,
}
/**
- * omap_calcuate_ecc - Generate non-inverted ECC bytes.
+ * omap_calculate_ecc - Generate non-inverted ECC bytes.
* @chip: NAND chip object
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 446ba8d43fbc..2c8685f1f2fa 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -288,6 +288,8 @@ static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
struct spinand_device *spinand = nand_to_spinand(nand);
bool enable = (req->mode != MTD_OPS_RAW);
+ memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
+
/* Only enable or disable the engine */
return spinand_ecc_enable(spinand, enable);
}
@@ -307,7 +309,7 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
if (req->type == NAND_PAGE_WRITE)
return 0;
- /* Finish a page write: check the status, report errors/bitflips */
+ /* Finish a page read: check the status, report errors/bitflips */
ret = spinand_check_ecc_status(spinand, engine_conf->status);
if (ret == -EBADMSG)
mtd->ecc_stats.failed++;
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index a9890350db02..3f31f1381a62 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -126,7 +126,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- 0,
+ SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35LF4GE4AD",
@@ -136,7 +136,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- 0,
+ SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35LF1G24AD",
@@ -146,16 +146,16 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- 0,
+ SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF2G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
- NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- 0,
+ SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF4G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
@@ -164,7 +164,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- 0,
+ SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX31LF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e),
@@ -173,7 +173,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- 0 /*SPINAND_HAS_QE_BIT*/,
+ SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX31UF1GE4BC",
@@ -183,7 +183,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- 0 /*SPINAND_HAS_QE_BIT*/,
+ SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index 6e0d5ce9b010..c546f8c5f24d 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -239,7 +239,7 @@ err:
static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
{
- struct partition *part = (struct partition*)dev;
+ struct partition *part = container_of(dev, struct partition, mbd);
u_long addr;
size_t retlen;
int rc;
@@ -600,7 +600,7 @@ static int find_free_sector(const struct partition *part, const struct block *bl
static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
{
- struct partition *part = (struct partition*)dev;
+ struct partition *part = container_of(dev, struct partition, mbd);
struct block *block;
u_long addr;
int i;
@@ -666,7 +666,7 @@ err:
static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
{
- struct partition *part = (struct partition*)dev;
+ struct partition *part = container_of(dev, struct partition, mbd);
u_long old_addr;
int i;
int rc = 0;
@@ -705,9 +705,37 @@ err:
return rc;
}
+static int rfd_ftl_discardsect(struct mtd_blktrans_dev *dev,
+ unsigned long sector, unsigned int nr_sects)
+{
+ struct partition *part = container_of(dev, struct partition, mbd);
+ u_long addr;
+ int rc;
+
+ while (nr_sects) {
+ if (sector >= part->sector_count)
+ return -EIO;
+
+ addr = part->sector_map[sector];
+
+ if (addr != -1) {
+ rc = mark_sector_deleted(part, addr);
+ if (rc)
+ return rc;
+
+ part->sector_map[sector] = -1;
+ }
+
+ sector++;
+ nr_sects--;
+ }
+
+ return 0;
+}
+
static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
{
- struct partition *part = (struct partition*)dev;
+ struct partition *part = container_of(dev, struct partition, mbd);
geo->heads = 1;
geo->sectors = SECTORS_PER_TRACK;
@@ -720,7 +748,8 @@ static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct partition *part;
- if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
+ if ((mtd->type != MTD_NORFLASH && mtd->type != MTD_RAM) ||
+ mtd->size > UINT_MAX)
return;
part = kzalloc(sizeof(struct partition), GFP_KERNEL);
@@ -754,7 +783,7 @@ static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
mtd->name, mtd->type, mtd->flags);
- if (!add_mtd_blktrans_dev((void*)part))
+ if (!add_mtd_blktrans_dev(&part->mbd))
return;
}
out:
@@ -763,7 +792,7 @@ out:
static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
{
- struct partition *part = (struct partition*)dev;
+ struct partition *part = container_of(dev, struct partition, mbd);
int i;
for (i=0; i<part->total_blocks; i++) {
@@ -771,10 +800,10 @@ static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
part->mbd.mtd->name, i, part->blocks[i].erases);
}
- del_mtd_blktrans_dev(dev);
vfree(part->sector_map);
kfree(part->header_cache);
kfree(part->blocks);
+ del_mtd_blktrans_dev(&part->mbd);
}
static struct mtd_blktrans_ops rfd_ftl_tr = {
@@ -785,6 +814,7 @@ static struct mtd_blktrans_ops rfd_ftl_tr = {
.readsect = rfd_ftl_readsect,
.writesect = rfd_ftl_writesect,
+ .discard = rfd_ftl_discardsect,
.getgeo = rfd_ftl_getgeo,
.add_mtd = rfd_ftl_add_mtd,
.remove_dev = rfd_ftl_remove_dev,
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index a4a202b9a0a2..6006c2e8fa2b 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -96,7 +96,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker);
static void ad_mux_machine(struct port *port, bool *update_slave_arr);
static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
static void ad_tx_machine(struct port *port);
-static void ad_periodic_machine(struct port *port, struct bond_params bond_params);
+static void ad_periodic_machine(struct port *port, struct bond_params *bond_params);
static void ad_port_selection_logic(struct port *port, bool *update_slave_arr);
static void ad_agg_selection_logic(struct aggregator *aggregator,
bool *update_slave_arr);
@@ -1298,7 +1298,7 @@ static void ad_tx_machine(struct port *port)
*
* Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
*/
-static void ad_periodic_machine(struct port *port, struct bond_params bond_params)
+static void ad_periodic_machine(struct port *port, struct bond_params *bond_params)
{
periodic_states_t last_state;
@@ -1308,7 +1308,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params bond_param
/* check if port was reinitialized */
if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
(!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) ||
- !bond_params.lacp_active) {
+ !bond_params->lacp_active) {
port->sm_periodic_state = AD_NO_PERIODIC;
}
/* check if state machine should change state */
@@ -2342,7 +2342,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
ad_rx_machine(NULL, port);
- ad_periodic_machine(port, bond->params);
+ ad_periodic_machine(port, &bond->params);
ad_port_selection_logic(port, &update_slave_arr);
ad_mux_machine(port, &update_slave_arr);
ad_tx_machine(port);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b0966e733926..77dc79a7f574 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2169,7 +2169,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
res = -EOPNOTSUPP;
goto err_sysfs_del;
}
- } else {
+ } else if (bond->xdp_prog) {
struct netdev_bpf xdp = {
.command = XDP_SETUP_PROG,
.flags = 0,
@@ -2910,9 +2910,9 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
* probe to generate any traffic (arp_validate=0)
*/
if (bond->params.arp_validate)
- net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
- bond->dev->name,
- &targets[i]);
+ pr_warn_once("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
+ bond->dev->name,
+ &targets[i]);
bond_arp_send(slave, ARPOP_REQUEST, targets[i],
0, tags);
continue;
@@ -5224,13 +5224,12 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
bpf_prog_inc(prog);
}
- if (old_prog)
- bpf_prog_put(old_prog);
-
- if (prog)
+ if (prog) {
static_branch_inc(&bpf_master_redirect_enabled_key);
- else
+ } else if (old_prog) {
+ bpf_prog_put(old_prog);
static_branch_dec(&bpf_master_redirect_enabled_key);
+ }
return 0;
diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c
index cd5f07fca2a5..377c7d2e7612 100644
--- a/drivers/net/can/c_can/c_can_ethtool.c
+++ b/drivers/net/can/c_can/c_can_ethtool.c
@@ -15,10 +15,8 @@ static void c_can_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct c_can_priv *priv = netdev_priv(netdev);
- struct platform_device *pdev = to_platform_device(priv->device);
-
strscpy(info->driver, "c_can", sizeof(info->driver));
- strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+ strscpy(info->bus_info, dev_name(priv->device), sizeof(info->bus_info));
}
static void c_can_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index c47988d3674e..ff9d0f5ae0dd 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -2017,7 +2017,7 @@ static int __maybe_unused rcar_canfd_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
rcar_canfd_resume);
-static const struct of_device_id rcar_canfd_of_table[] = {
+static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
{ .compatible = "renesas,rcar-gen3-canfd", .data = (void *)RENESAS_RCAR_GEN3 },
{ .compatible = "renesas,rzg2l-canfd", .data = (void *)RENESAS_RZG2L },
{ }
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index bd1417a66cbf..604f54112665 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1144,7 +1144,7 @@ static void b53_force_link(struct b53_device *dev, int port, int link)
u8 reg, val, off;
/* Override the port settings */
- if (port == dev->cpu_port) {
+ if (port == dev->imp_port) {
off = B53_PORT_OVERRIDE_CTRL;
val = PORT_OVERRIDE_EN;
} else {
@@ -1168,7 +1168,7 @@ static void b53_force_port_config(struct b53_device *dev, int port,
u8 reg, val, off;
/* Override the port settings */
- if (port == dev->cpu_port) {
+ if (port == dev->imp_port) {
off = B53_PORT_OVERRIDE_CTRL;
val = PORT_OVERRIDE_EN;
} else {
@@ -1236,7 +1236,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
b53_force_link(dev, port, phydev->link);
if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
- if (port == 8)
+ if (port == dev->imp_port)
off = B53_RGMII_CTRL_IMP;
else
off = B53_RGMII_CTRL_P(port);
@@ -2280,6 +2280,7 @@ struct b53_chip_data {
const char *dev_name;
u16 vlans;
u16 enabled_ports;
+ u8 imp_port;
u8 cpu_port;
u8 vta_regs[3];
u8 arl_bins;
@@ -2304,6 +2305,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1f,
.arl_bins = 2,
.arl_buckets = 1024,
+ .imp_port = 5,
.cpu_port = B53_CPU_PORT_25,
.duplex_reg = B53_DUPLEX_STAT_FE,
},
@@ -2314,6 +2316,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1f,
.arl_bins = 2,
.arl_buckets = 1024,
+ .imp_port = 5,
.cpu_port = B53_CPU_PORT_25,
.duplex_reg = B53_DUPLEX_STAT_FE,
},
@@ -2324,6 +2327,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1f,
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2337,6 +2341,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1f,
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2350,6 +2355,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1f,
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_9798,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2363,6 +2369,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x7f,
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_9798,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2377,6 +2384,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.vta_regs = B53_VTA_REGS,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2389,6 +2397,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0xff,
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2402,6 +2411,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1ff,
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2415,6 +2425,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0, /* pdata must provide them */
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_63XX,
.duplex_reg = B53_DUPLEX_STAT_63XX,
@@ -2428,6 +2439,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1f,
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2441,6 +2453,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2454,6 +2467,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2467,6 +2481,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1f,
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2480,6 +2495,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1f,
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2493,6 +2509,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1ff,
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2506,6 +2523,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x103,
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2520,6 +2538,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 256,
+ .imp_port = 8,
.cpu_port = 8, /* TODO: ports 4, 5, 8 */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2533,6 +2552,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1ff,
.arl_bins = 4,
.arl_buckets = 1024,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2546,6 +2566,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.enabled_ports = 0x1ff,
.arl_bins = 4,
.arl_buckets = 256,
+ .imp_port = 8,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2571,6 +2592,7 @@ static int b53_switch_init(struct b53_device *dev)
dev->vta_regs[1] = chip->vta_regs[1];
dev->vta_regs[2] = chip->vta_regs[2];
dev->jumbo_pm_reg = chip->jumbo_pm_reg;
+ dev->imp_port = chip->imp_port;
dev->cpu_port = chip->cpu_port;
dev->num_vlans = chip->vlans;
dev->num_arl_bins = chip->arl_bins;
@@ -2612,9 +2634,10 @@ static int b53_switch_init(struct b53_device *dev)
dev->cpu_port = 5;
}
- /* cpu port is always last */
- dev->num_ports = dev->cpu_port + 1;
dev->enabled_ports |= BIT(dev->cpu_port);
+ dev->num_ports = fls(dev->enabled_ports);
+
+ dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
/* Include non standard CPU port built-in PHYs to be probed */
if (is539x(dev) || is531x5(dev)) {
@@ -2660,7 +2683,6 @@ struct b53_device *b53_switch_alloc(struct device *base,
return NULL;
ds->dev = base;
- ds->num_ports = DSA_MAX_PORTS;
dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
if (!dev)
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 9bf8319342b0..5d068acf7cf8 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -123,6 +123,7 @@ struct b53_device {
/* used ports mask */
u16 enabled_ports;
+ unsigned int imp_port;
unsigned int cpu_port;
/* connect specific data */
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index e78026ef6d8c..64d6dfa83122 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -843,7 +843,8 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
GSWIP_MAC_CTRL_2p(cpu_port));
- gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN);
+ gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
+ GSWIP_MAC_FLEN);
gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
GSWIP_BM_QUEUE_GCTRL);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 17c16333a412..7b0ae9efc004 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2786,7 +2786,7 @@ static void
dump_tx_ring(struct net_device *dev)
{
if (vortex_debug > 0) {
- struct vortex_private *vp = netdev_priv(dev);
+ struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
if (vp->full_bus_master_tx) {
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index a705e2615307..8c83973adca5 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8038,9 +8038,9 @@ bnx2_get_pci_speed(struct bnx2 *bp)
static void
bnx2_read_vpd_fw_ver(struct bnx2 *bp)
{
+ unsigned int len;
int rc, i, j;
u8 *data;
- unsigned int block_end, rosize, len;
#define BNX2_VPD_NVRAM_OFFSET 0x300
#define BNX2_VPD_LEN 128
@@ -8057,38 +8057,21 @@ bnx2_read_vpd_fw_ver(struct bnx2 *bp)
for (i = 0; i < BNX2_VPD_LEN; i += 4)
swab32s((u32 *)&data[i]);
- i = pci_vpd_find_tag(data, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
- if (i < 0)
- goto vpd_done;
-
- rosize = pci_vpd_lrdt_size(&data[i]);
- i += PCI_VPD_LRDT_TAG_SIZE;
- block_end = i + rosize;
-
- if (block_end > BNX2_VPD_LEN)
- goto vpd_done;
-
- j = pci_vpd_find_info_keyword(data, i, rosize,
- PCI_VPD_RO_KEYWORD_MFR_ID);
+ j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
+ PCI_VPD_RO_KEYWORD_MFR_ID, &len);
if (j < 0)
goto vpd_done;
- len = pci_vpd_info_field_size(&data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end || len != 4 ||
- memcmp(&data[j], "1028", 4))
+ if (len != 4 || memcmp(&data[j], "1028", 4))
goto vpd_done;
- j = pci_vpd_find_info_keyword(data, i, rosize,
- PCI_VPD_RO_KEYWORD_VENDOR0);
+ j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
+ PCI_VPD_RO_KEYWORD_VENDOR0,
+ &len);
if (j < 0)
goto vpd_done;
- len = pci_vpd_info_field_size(&data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
+ if (len > BNX2_MAX_VER_SLEN)
goto vpd_done;
memcpy(bp->fw_version, &data[j], len);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d04994840b87..e789430f407c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2407,7 +2407,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
#endif
-#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
#define VF_ACQUIRE_THRESH 3
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6d98134913cd..ae87296ae1ff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12189,86 +12189,35 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
static void bnx2x_read_fwinfo(struct bnx2x *bp)
{
- int cnt, i, block_end, rodi;
- char vpd_start[BNX2X_VPD_LEN+1];
- char str_id_reg[VENDOR_ID_LEN+1];
- char str_id_cap[VENDOR_ID_LEN+1];
- char *vpd_data;
- char *vpd_extended_data = NULL;
- u8 len;
-
- cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
- memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
-
- if (cnt < BNX2X_VPD_LEN)
- goto out_not_found;
-
- /* VPD RO tag should be first tag after identifier string, hence
- * we should be able to find it in first BNX2X_VPD_LEN chars
- */
- i = pci_vpd_find_tag(vpd_start, BNX2X_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
- if (i < 0)
- goto out_not_found;
-
- block_end = i + PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(&vpd_start[i]);
-
- i += PCI_VPD_LRDT_TAG_SIZE;
-
- if (block_end > BNX2X_VPD_LEN) {
- vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
- if (vpd_extended_data == NULL)
- goto out_not_found;
-
- /* read rest of vpd image into vpd_extended_data */
- memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
- cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
- block_end - BNX2X_VPD_LEN,
- vpd_extended_data + BNX2X_VPD_LEN);
- if (cnt < (block_end - BNX2X_VPD_LEN))
- goto out_not_found;
- vpd_data = vpd_extended_data;
- } else
- vpd_data = vpd_start;
+ char str_id[VENDOR_ID_LEN + 1];
+ unsigned int vpd_len, kw_len;
+ u8 *vpd_data;
+ int rodi;
- /* now vpd_data holds full vpd content in both cases */
-
- rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
- PCI_VPD_RO_KEYWORD_MFR_ID);
- if (rodi < 0)
- goto out_not_found;
+ memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
- len = pci_vpd_info_field_size(&vpd_data[rodi]);
+ vpd_data = pci_vpd_alloc(bp->pdev, &vpd_len);
+ if (IS_ERR(vpd_data))
+ return;
- if (len != VENDOR_ID_LEN)
+ rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
+ PCI_VPD_RO_KEYWORD_MFR_ID, &kw_len);
+ if (rodi < 0 || kw_len != VENDOR_ID_LEN)
goto out_not_found;
- rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
-
/* vendor specific info */
- snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
- snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
- if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
- !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
-
- rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
- PCI_VPD_RO_KEYWORD_VENDOR0);
- if (rodi >= 0) {
- len = pci_vpd_info_field_size(&vpd_data[rodi]);
-
- rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
-
- if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
- memcpy(bp->fw_ver, &vpd_data[rodi], len);
- bp->fw_ver[len] = ' ';
- }
+ snprintf(str_id, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+ if (!strncasecmp(str_id, &vpd_data[rodi], VENDOR_ID_LEN)) {
+ rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
+ PCI_VPD_RO_KEYWORD_VENDOR0,
+ &kw_len);
+ if (rodi >= 0 && kw_len < sizeof(bp->fw_ver)) {
+ memcpy(bp->fw_ver, &vpd_data[rodi], kw_len);
+ bp->fw_ver[kw_len] = ' ';
}
- kfree(vpd_extended_data);
- return;
}
out_not_found:
- kfree(vpd_extended_data);
- return;
+ kfree(vpd_data);
}
static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 627f85ee3922..ea0c45d33814 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -305,13 +305,15 @@ static bool bnxt_vf_pciid(enum board_idx idx)
writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
#define BNXT_DB_NQ_P5(db, idx) \
- writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \
+ (db)->doorbell)
#define BNXT_DB_CQ_ARM(db, idx) \
writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
#define BNXT_DB_NQ_ARM_P5(db, idx) \
- writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
+ (db)->doorbell)
static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
@@ -332,8 +334,8 @@ static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
if (bp->flags & BNXT_FLAG_CHIP_P5)
- writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
- db->doorbell);
+ bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
+ RING_CMP(idx), db->doorbell);
else
BNXT_DB_CQ(db, idx);
}
@@ -2200,25 +2202,34 @@ static int bnxt_async_event_process(struct bnxt *bp,
if (!fw_health)
goto async_event_process_exit;
- fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
- fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
- if (!fw_health->enabled) {
+ if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
+ fw_health->enabled = false;
netif_info(bp, drv, bp->dev,
"Error recovery info: error recovery[0]\n");
break;
}
+ fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
fw_health->tmr_multiplier =
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
fw_health->tmr_counter = fw_health->tmr_multiplier;
- fw_health->last_fw_heartbeat =
- bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
- fw_health->last_fw_reset_cnt =
- bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ if (!fw_health->enabled) {
+ fw_health->last_fw_heartbeat =
+ bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+ fw_health->last_fw_reset_cnt =
+ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ }
netif_info(bp, drv, bp->dev,
"Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
fw_health->master, fw_health->last_fw_reset_cnt,
bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
+ if (!fw_health->enabled) {
+ /* Make sure tmr_counter is set and visible to
+ * bnxt_health_check() before setting enabled to true.
+ */
+ smp_wmb();
+ fw_health->enabled = true;
+ }
goto async_event_process_exit;
}
case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
@@ -2638,8 +2649,8 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
if (cpr2 && cpr2->had_work_done) {
db = &cpr2->cp_db;
- writeq(db->db_key64 | dbr_type |
- RING_CMP(cpr2->cp_raw_cons), db->doorbell);
+ bnxt_writeq(bp, db->db_key64 | dbr_type |
+ RING_CMP(cpr2->cp_raw_cons), db->doorbell);
cpr2->had_work_done = 0;
}
}
@@ -4639,6 +4650,13 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
struct hwrm_tunnel_dst_port_free_input *req;
int rc;
+ if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
+ bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
+ return 0;
+ if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
+ bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
+ return 0;
+
rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
if (rc)
return rc;
@@ -4648,10 +4666,12 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
switch (tunnel_type) {
case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
+ bp->vxlan_port = 0;
bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
break;
case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
+ bp->nge_port = 0;
bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
break;
default:
@@ -4689,10 +4709,12 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
switch (tunnel_type) {
case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
+ bp->vxlan_port = port;
bp->vxlan_fw_dst_port_id =
le16_to_cpu(resp->tunnel_dst_port_id);
break;
case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
+ bp->nge_port = port;
bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
break;
default:
@@ -8221,12 +8243,10 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{
- if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
- bnxt_hwrm_tunnel_dst_port_free(
- bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
- if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
- bnxt_hwrm_tunnel_dst_port_free(
- bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+ bnxt_hwrm_tunnel_dst_port_free(bp,
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ bnxt_hwrm_tunnel_dst_port_free(bp,
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
}
static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
@@ -11247,6 +11267,8 @@ static void bnxt_fw_health_check(struct bnxt *bp)
if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return;
+ /* Make sure it is enabled before checking the tmr_counter. */
+ smp_rmb();
if (fw_health->tmr_counter) {
fw_health->tmr_counter--;
return;
@@ -12625,13 +12647,10 @@ static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
unsigned int cmd;
udp_tunnel_nic_get_port(netdev, table, 0, &ti);
- if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
- bp->vxlan_port = ti.port;
+ if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
- } else {
- bp->nge_port = ti.port;
+ else
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
- }
if (ti.port)
return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
@@ -13081,66 +13100,35 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
return rc;
}
-#define BNXT_VPD_LEN 512
static void bnxt_vpd_read_info(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
- int i, len, pos, ro_size, size;
- ssize_t vpd_size;
+ unsigned int vpd_size, kw_len;
+ int pos, size;
u8 *vpd_data;
- vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
- if (!vpd_data)
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(pdev, "Unable to read VPD\n");
return;
-
- vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
- if (vpd_size <= 0) {
- netdev_err(bp->dev, "Unable to read VPD\n");
- goto exit;
- }
-
- i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
- netdev_err(bp->dev, "VPD READ-Only not found\n");
- goto exit;
- }
-
- i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
- netdev_err(bp->dev, "VPD READ-Only not found\n");
- goto exit;
}
- ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
- i += PCI_VPD_LRDT_TAG_SIZE;
- if (i + ro_size > vpd_size)
- goto exit;
-
- pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
- PCI_VPD_RO_KEYWORD_PARTNO);
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
if (pos < 0)
goto read_sn;
- len = pci_vpd_info_field_size(&vpd_data[pos]);
- pos += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (len + pos > vpd_size)
- goto read_sn;
-
- size = min(len, BNXT_VPD_FLD_LEN - 1);
+ size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
memcpy(bp->board_partno, &vpd_data[pos], size);
read_sn:
- pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
- PCI_VPD_RO_KEYWORD_SERIALNO);
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO,
+ &kw_len);
if (pos < 0)
goto exit;
- len = pci_vpd_info_field_size(&vpd_data[pos]);
- pos += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (len + pos > vpd_size)
- goto exit;
-
- size = min(len, BNXT_VPD_FLD_LEN - 1);
+ size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
memcpy(bp->board_serialno, &vpd_data[pos], size);
exit:
kfree(vpd_data);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a8212dcdad5f..ec046e7a2484 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -28,6 +28,7 @@
#include <net/dst_metadata.h>
#include <net/xdp.h>
#include <linux/dim.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#ifdef CONFIG_TEE_BNXT_FW
#include <linux/firmware/broadcom/tee_bnxt_fw.h>
#endif
@@ -1981,7 +1982,7 @@ struct bnxt {
struct mutex sriov_lock;
#endif
-#ifndef writeq
+#if BITS_PER_LONG == 32
/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
spinlock_t db_lock;
#endif
@@ -2110,24 +2111,36 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
}
-#ifndef writeq
-#define writeq(val64, db) \
-do { \
- spin_lock(&bp->db_lock); \
- writel((val64) & 0xffffffff, db); \
- writel((val64) >> 32, (db) + 4); \
- spin_unlock(&bp->db_lock); \
-} while (0)
+static inline void bnxt_writeq(struct bnxt *bp, u64 val,
+ volatile void __iomem *addr)
+{
+#if BITS_PER_LONG == 32
+ spin_lock(&bp->db_lock);
+ lo_hi_writeq(val, addr);
+ spin_unlock(&bp->db_lock);
+#else
+ writeq(val, addr);
+#endif
+}
-#define writeq_relaxed writeq
+static inline void bnxt_writeq_relaxed(struct bnxt *bp, u64 val,
+ volatile void __iomem *addr)
+{
+#if BITS_PER_LONG == 32
+ spin_lock(&bp->db_lock);
+ lo_hi_writeq_relaxed(val, addr);
+ spin_unlock(&bp->db_lock);
+#else
+ writeq_relaxed(val, addr);
#endif
+}
/* For TX and RX ring doorbells with no ordering guarantee*/
static inline void bnxt_db_write_relaxed(struct bnxt *bp,
struct bnxt_db_info *db, u32 idx)
{
if (bp->flags & BNXT_FLAG_CHIP_P5) {
- writeq_relaxed(db->db_key64 | idx, db->doorbell);
+ bnxt_writeq_relaxed(bp, db->db_key64 | idx, db->doorbell);
} else {
u32 db_val = db->db_key32 | idx;
@@ -2142,7 +2155,7 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
u32 idx)
{
if (bp->flags & BNXT_FLAG_CHIP_P5) {
- writeq(db->db_key64 | idx, db->doorbell);
+ bnxt_writeq(bp, db->db_key64 | idx, db->doorbell);
} else {
u32 db_val = db->db_key32 | idx;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 1423cc617d93..9576547df4ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -352,13 +352,16 @@ static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
dst->vu8 = (u8)val32;
}
-static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp,
- union devlink_param_value *nvm_cfg_ver)
+static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver)
{
struct hwrm_nvm_get_variable_input *req;
+ u16 bytes = BNXT_NVM_CFG_VER_BYTES;
+ u16 bits = BNXT_NVM_CFG_VER_BITS;
+ union devlink_param_value ver;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
- int rc;
+ int rc, i = 2;
+ u16 dim = 1;
rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
if (rc)
@@ -370,16 +373,34 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp,
goto exit;
}
+ /* earlier devices present as an array of raw bytes */
+ if (!BNXT_CHIP_P5(bp)) {
+ dim = 0;
+ i = 0;
+ bits *= 3; /* array of 3 version components */
+ bytes *= 4; /* copy whole word */
+ }
+
hwrm_req_hold(bp, req);
req->dest_data_addr = cpu_to_le64(data_dma_addr);
- req->data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS);
+ req->data_len = cpu_to_le16(bits);
req->option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER);
+ req->dimensions = cpu_to_le16(dim);
- rc = hwrm_req_send_silent(bp, req);
- if (!rc)
- bnxt_copy_from_nvm_data(nvm_cfg_ver, data,
- BNXT_NVM_CFG_VER_BITS,
- BNXT_NVM_CFG_VER_BYTES);
+ while (i >= 0) {
+ req->index_0 = cpu_to_le16(i--);
+ rc = hwrm_req_send_silent(bp, req);
+ if (rc)
+ goto exit;
+ bnxt_copy_from_nvm_data(&ver, data, bits, bytes);
+
+ if (BNXT_CHIP_P5(bp)) {
+ *nvm_cfg_ver <<= 8;
+ *nvm_cfg_ver |= ver.vu8;
+ } else {
+ *nvm_cfg_ver = ver.vu32;
+ }
+ }
exit:
hwrm_req_drop(bp, req);
@@ -416,12 +437,12 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
{
struct hwrm_nvm_get_dev_info_output nvm_dev_info;
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
- union devlink_param_value nvm_cfg_ver;
struct hwrm_ver_get_output *ver_resp;
char mgmt_ver[FW_VER_STR_LEN];
char roce_ver[FW_VER_STR_LEN];
char ncsi_ver[FW_VER_STR_LEN];
char buf[32];
+ u32 ver = 0;
int rc;
rc = devlink_info_driver_name_put(req, DRV_MODULE_NAME);
@@ -456,7 +477,7 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
return rc;
ver_resp = &bp->ver_resp;
- sprintf(buf, "%X", ver_resp->chip_rev);
+ sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal);
rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
if (rc)
@@ -475,11 +496,9 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (rc)
return rc;
- if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
- u32 ver = nvm_cfg_ver.vu32;
-
- sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xf, (ver >> 8) & 0xf,
- ver & 0xf);
+ if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &ver)) {
+ sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xff, (ver >> 8) & 0xff,
+ ver & 0xff);
rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
buf);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index d22cab5d6856..d889f240da2b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -40,8 +40,8 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
#define NVM_OFF_ENABLE_SRIOV 401
#define NVM_OFF_NVM_CFG_VER 602
-#define BNXT_NVM_CFG_VER_BITS 24
-#define BNXT_NVM_CFG_VER_BYTES 4
+#define BNXT_NVM_CFG_VER_BITS 8
+#define BNXT_NVM_CFG_VER_BYTES 1
#define BNXT_MSIX_VEC_MAX 512
#define BNXT_MSIX_VEC_MIN_MAX 128
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index acef61abe35d..bb7327b82d0b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -145,11 +145,11 @@ void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout)
* @bp: The driver context.
* @req: The request for which calls to hwrm_req_dma_slice() will have altered
* allocation flags.
- * @flags: A bitmask of GFP flags. These flags are passed to
- * dma_alloc_coherent() whenever it is used to allocate backing memory
- * for slices. Note that calls to hwrm_req_dma_slice() will not always
- * result in new allocations, however, memory suballocated from the
- * request buffer is already __GFP_ZERO.
+ * @gfp: A bitmask of GFP flags. These flags are passed to dma_alloc_coherent()
+ * whenever it is used to allocate backing memory for slices. Note that
+ * calls to hwrm_req_dma_slice() will not always result in new allocations,
+ * however, memory suballocated from the request buffer is already
+ * __GFP_ZERO.
*
* Sets the GFP allocation flags associated with the request for subsequent
* calls to hwrm_req_dma_slice(). This can be useful for specifying __GFP_ZERO
@@ -698,8 +698,8 @@ int hwrm_req_send_silent(struct bnxt *bp, void *req)
* @bp: The driver context.
* @req: The request for which indirect data will be associated.
* @size: The size of the allocation.
- * @dma: The bus address associated with the allocation. The HWRM API has no
- * knowledge about the type of the request and so cannot infer how the
+ * @dma_handle: The bus address associated with the allocation. The HWRM API has
+ * no knowledge about the type of the request and so cannot infer how the
* caller intends to use the indirect data. Thus, the caller is
* responsible for configuring the request object appropriately to
* point to the associated indirect memory. Note, DMA handle has the
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 8a238e349e02..5e0e0e70d801 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12788,7 +12788,7 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
}
-static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
+static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
{
int i;
__be32 *buf;
@@ -12822,15 +12822,11 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
offset = TG3_NVM_VPD_OFF;
len = TG3_NVM_VPD_LEN;
}
- } else {
- len = TG3_NVM_PCI_VPD_MAX_LEN;
- }
- buf = kmalloc(len, GFP_KERNEL);
- if (buf == NULL)
- return NULL;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return NULL;
- if (magic == TG3_EEPROM_MAGIC) {
for (i = 0; i < len; i += 4) {
/* The data is in little-endian format in NVRAM.
* Use the big-endian read routines to preserve
@@ -12841,12 +12837,9 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
}
*vpdlen = len;
} else {
- ssize_t cnt;
-
- cnt = pci_read_vpd(tp->pdev, 0, len, (u8 *)buf);
- if (cnt < 0)
- goto error;
- *vpdlen = cnt;
+ buf = pci_vpd_alloc(tp->pdev, vpdlen);
+ if (IS_ERR(buf))
+ return NULL;
}
return buf;
@@ -12868,9 +12861,10 @@ error:
static int tg3_test_nvram(struct tg3 *tp)
{
- u32 csum, magic, len;
+ u32 csum, magic;
__be32 *buf;
int i, j, k, err = 0, size;
+ unsigned int len;
if (tg3_flag(tp, NO_NVRAM))
return 0;
@@ -13013,33 +13007,10 @@ static int tg3_test_nvram(struct tg3 *tp)
if (!buf)
return -ENOMEM;
- i = pci_vpd_find_tag((u8 *)buf, len, PCI_VPD_LRDT_RO_DATA);
- if (i > 0) {
- j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
- if (j < 0)
- goto out;
-
- if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
- goto out;
-
- i += PCI_VPD_LRDT_TAG_SIZE;
- j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
- PCI_VPD_RO_KEYWORD_CHKSUM);
- if (j > 0) {
- u8 csum8 = 0;
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
-
- for (i = 0; i <= j; i++)
- csum8 += ((u8 *)buf)[i];
-
- if (csum8)
- goto out;
- }
- }
-
- err = 0;
-
+ err = pci_vpd_check_csum(buf, len);
+ /* go on if no checksum found */
+ if (err == 1)
+ err = 0;
out:
kfree(buf);
return err;
@@ -15624,64 +15595,36 @@ skip_phy_reset:
static void tg3_read_vpd(struct tg3 *tp)
{
u8 *vpd_data;
- unsigned int block_end, rosize, len;
- u32 vpdlen;
- int j, i = 0;
+ unsigned int len, vpdlen;
+ int i;
vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
if (!vpd_data)
goto out_no_vpd;
- i = pci_vpd_find_tag(vpd_data, vpdlen, PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
+ PCI_VPD_RO_KEYWORD_MFR_ID, &len);
if (i < 0)
- goto out_not_found;
-
- rosize = pci_vpd_lrdt_size(&vpd_data[i]);
- block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
- i += PCI_VPD_LRDT_TAG_SIZE;
+ goto partno;
- if (block_end > vpdlen)
- goto out_not_found;
-
- j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
- PCI_VPD_RO_KEYWORD_MFR_ID);
- if (j > 0) {
- len = pci_vpd_info_field_size(&vpd_data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end || len != 4 ||
- memcmp(&vpd_data[j], "1028", 4))
- goto partno;
-
- j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
- PCI_VPD_RO_KEYWORD_VENDOR0);
- if (j < 0)
- goto partno;
+ if (len != 4 || memcmp(vpd_data + i, "1028", 4))
+ goto partno;
- len = pci_vpd_info_field_size(&vpd_data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end)
- goto partno;
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
+ PCI_VPD_RO_KEYWORD_VENDOR0, &len);
+ if (i < 0)
+ goto partno;
- if (len >= sizeof(tp->fw_ver))
- len = sizeof(tp->fw_ver) - 1;
- memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
- snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
- &vpd_data[j]);
- }
+ memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
+ snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
partno:
- i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
- PCI_VPD_RO_KEYWORD_PARTNO);
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
+ PCI_VPD_RO_KEYWORD_PARTNO, &len);
if (i < 0)
goto out_not_found;
- len = pci_vpd_info_field_size(&vpd_data[i]);
-
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (len > TG3_BPN_SIZE ||
- (len + i) > vpdlen)
+ if (len > TG3_BPN_SIZE)
goto out_not_found;
memcpy(tp->board_part_number, &vpd_data[i], len);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 46ec4fdfd16a..1000c894064f 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2101,7 +2101,6 @@
/* Hardware Legacy NVRAM layout */
#define TG3_NVM_VPD_OFF 0x100
#define TG3_NVM_VPD_LEN 256
-#define TG3_NVM_PCI_VPD_MAX_LEN 512
/* Hardware Selfboot NVRAM layout */
#define TG3_NVM_HWSB_CFG1 0x00000004
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index c6fe0f2a4d0e..f6396ac64006 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -526,7 +526,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
oct->irq_name_storage = NULL;
}
/* Soft reset the octeon device before exiting */
- if (oct->pci_dev->reset_fn)
+ if (!pcie_reset_flr(oct->pci_dev, PCI_RESET_PROBE))
octeon_pci_flr(oct);
else
cn23xx_vf_ask_pf_to_do_flr(oct);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 73c016166f06..d246eee4b6d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -1111,6 +1111,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!adapter->registered_device_map) {
pr_err("%s: could not register any net devices\n",
pci_name(pdev));
+ err = -EINVAL;
goto out_release_adapter_res;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index e21a2e691382..c3afec1041f8 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -3301,6 +3301,9 @@ void t3_sge_stop(struct adapter *adap)
t3_sge_stop_dma(adap);
+ /* workqueues aren't initialized otherwise */
+ if (!(adap->flags & FULL_INIT_DONE))
+ return;
for (i = 0; i < SGE_QSETS; ++i) {
struct sge_qset *qs = &adap->sge.qs[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 9058f09f921e..ecea3cdd30b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -84,7 +84,6 @@ extern struct mutex uld_mutex;
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
- EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
PN_LEN = 16, /* Part Number length */
MACADDR_LEN = 12, /* MAC Address length */
@@ -391,7 +390,6 @@ struct tp_params {
struct vpd_params {
unsigned int cclk;
- u8 ec[EC_LEN + 1];
u8 sn[SERNUM_LEN + 1];
u8 id[ID_LEN + 1];
u8 pn[PN_LEN + 1];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 6606fb8b3e42..64144b6171d7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2743,10 +2743,9 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
*/
int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- int i, ret = 0, addr;
- int ec, sn, pn, na;
- u8 *vpd, csum, base_val = 0;
- unsigned int vpdr_len, kw_offset, id_len;
+ unsigned int id_len, pn_len, sn_len, na_len;
+ int id, sn, pn, na, addr, ret = 0;
+ u8 *vpd, base_val = 0;
vpd = vmalloc(VPD_LEN);
if (!vpd)
@@ -2765,74 +2764,52 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (ret < 0)
goto out;
- if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
- dev_err(adapter->pdev_dev, "missing VPD ID string\n");
- ret = -EINVAL;
+ ret = pci_vpd_find_id_string(vpd, VPD_LEN, &id_len);
+ if (ret < 0)
goto out;
- }
+ id = ret;
- id_len = pci_vpd_lrdt_size(vpd);
- if (id_len > ID_LEN)
- id_len = ID_LEN;
-
- i = pci_vpd_find_tag(vpd, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
- dev_err(adapter->pdev_dev, "missing VPD-R section\n");
+ ret = pci_vpd_check_csum(vpd, VPD_LEN);
+ if (ret) {
+ dev_err(adapter->pdev_dev, "VPD checksum incorrect or missing\n");
ret = -EINVAL;
goto out;
}
- vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
- kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
- if (vpdr_len + kw_offset > VPD_LEN) {
- dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
- ret = -EINVAL;
+ ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &sn_len);
+ if (ret < 0)
goto out;
- }
-
-#define FIND_VPD_KW(var, name) do { \
- var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
- if (var < 0) { \
- dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
- ret = -EINVAL; \
- goto out; \
- } \
- var += PCI_VPD_INFO_FLD_HDR_SIZE; \
-} while (0)
-
- FIND_VPD_KW(i, "RV");
- for (csum = 0; i >= 0; i--)
- csum += vpd[i];
+ sn = ret;
- if (csum) {
- dev_err(adapter->pdev_dev,
- "corrupted VPD EEPROM, actual csum %u\n", csum);
- ret = -EINVAL;
+ ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN,
+ PCI_VPD_RO_KEYWORD_PARTNO, &pn_len);
+ if (ret < 0)
goto out;
- }
+ pn = ret;
- FIND_VPD_KW(ec, "EC");
- FIND_VPD_KW(sn, "SN");
- FIND_VPD_KW(pn, "PN");
- FIND_VPD_KW(na, "NA");
-#undef FIND_VPD_KW
+ ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN, "NA", &na_len);
+ if (ret < 0)
+ goto out;
+ na = ret;
- memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
+ memcpy(p->id, vpd + id, min_t(int, id_len, ID_LEN));
strim(p->id);
- memcpy(p->ec, vpd + ec, EC_LEN);
- strim(p->ec);
- i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
- memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
+ memcpy(p->sn, vpd + sn, min_t(int, sn_len, SERNUM_LEN));
strim(p->sn);
- i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
- memcpy(p->pn, vpd + pn, min(i, PN_LEN));
+ memcpy(p->pn, vpd + pn, min_t(int, pn_len, PN_LEN));
strim(p->pn);
- memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
+ memcpy(p->na, vpd + na, min_t(int, na_len, MACADDR_LEN));
strim((char *)p->na);
out:
vfree(vpd);
- return ret < 0 ? ret : 0;
+ if (ret < 0) {
+ dev_err(adapter->pdev_dev, "error reading VPD\n");
+ return ret;
+ }
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index dac1764ba740..5bdf731d9503 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -38,7 +38,7 @@ config CS89x0_ISA
config CS89x0_PLATFORM
tristate "CS89x0 platform driver support"
- depends on ARM || COMPILE_TEST
+ depends on ARM || (COMPILE_TEST && !PPC)
select CS89x0
help
Say Y to compile the cs89x0 platform driver. This makes this driver
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 474c6d1664e7..ac9b69513332 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -362,7 +362,7 @@ static void hclge_set_default_capability(struct hclge_dev *hdev)
}
}
-const struct hclge_caps_bit_map hclge_cmd_caps_bit_map0[] = {
+static const struct hclge_caps_bit_map hclge_cmd_caps_bit_map0[] = {
{HCLGE_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
{HCLGE_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
{HCLGE_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 59772b0e9531..f89bfb352adf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -342,7 +342,7 @@ static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
}
-const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = {
+static const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = {
{HCLGEVF_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
{HCLGEVF_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
{HCLGEVF_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index 96c6f4f36904..48e001881c75 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -196,7 +196,7 @@ out_free_netdev:
return retval;
}
-static int __exit lan_remove_chip(struct parisc_device *pdev)
+static void __exit lan_remove_chip(struct parisc_device *pdev)
{
struct net_device *dev = parisc_get_drvdata(pdev);
struct i596_private *lp = netdev_priv(dev);
@@ -205,7 +205,6 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
dma_free_noncoherent(&pdev->dev, sizeof(struct i596_private), lp->dma,
lp->dma_addr, DMA_BIDIRECTIONAL);
free_netdev (dev);
- return 0;
}
static const struct parisc_device_id lan_tbl[] __initconst = {
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 893e0ddcb611..0696f723228a 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -314,7 +314,7 @@ static int __init sun3_82586_probe(void)
err = register_netdev(dev);
if (err)
goto out2;
- return dev;
+ return 0;
out2:
release_region(ioaddr, SUN3_82586_TOTAL_SIZE);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 7f3d01059e19..34a089b71e55 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1487,7 +1487,7 @@ static int cgx_lmac_init(struct cgx *cgx)
MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
if (err)
- return err;
+ goto err_name_free;
/* Reserve first entry for default MAC address */
set_bit(0, lmac->mac_to_index_bmap.bmap);
@@ -1497,7 +1497,7 @@ static int cgx_lmac_init(struct cgx *cgx)
spin_lock_init(&lmac->event_cb_lock);
err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
if (err)
- goto err_irq;
+ goto err_bitmap_free;
/* Add reference */
cgx->lmac_idmap[lmac->lmac_id] = lmac;
@@ -1507,7 +1507,9 @@ static int cgx_lmac_init(struct cgx *cgx)
return cgx_lmac_verify_fwi_version(cgx);
-err_irq:
+err_bitmap_free:
+ rvu_free_bitmap(&lmac->mac_to_index_bmap);
+err_name_free:
kfree(lmac->name);
err_lmac_free:
kfree(lmac);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index ce647e037f4d..35836903b7fb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -92,7 +92,8 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
*/
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
{
- unsigned long timeout = jiffies + usecs_to_jiffies(10000);
+ unsigned long timeout = jiffies + usecs_to_jiffies(20000);
+ bool twice = false;
void __iomem *reg;
u64 reg_val;
@@ -107,6 +108,15 @@ again:
usleep_range(1, 5);
goto again;
}
+ /* In scenarios where CPU is scheduled out before checking
+ * 'time_before' (above) and gets scheduled in such that
+ * jiffies are beyond timeout value, then check again if HW is
+ * done with the operation in the meantime.
+ */
+ if (!twice) {
+ twice = true;
+ goto again;
+ }
return -EBUSY;
}
@@ -201,6 +211,11 @@ int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
return 0;
}
+void rvu_free_bitmap(struct rsrc_bmap *rsrc)
+{
+ kfree(rsrc->bmap);
+}
+
/* Get block LF's HW index from a PF_FUNC's block slot number */
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index d38e5c980c30..1d9411232f1d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -638,6 +638,7 @@ static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
}
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
+void rvu_free_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 3cc76f14d2fd..95f21dfdba48 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -27,7 +27,8 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf)
{
struct lmtst_tbl_setup_req *req;
- int qcount, err;
+ struct otx2_lmt_info *lmt_info;
+ int err, cpu;
if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
pfvf->hw_ops = &otx2_hw_ops;
@@ -35,15 +36,9 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf)
}
pfvf->hw_ops = &cn10k_hw_ops;
- qcount = pfvf->hw.max_queues;
- /* LMTST lines allocation
- * qcount = num_online_cpus();
- * NPA = TX + RX + XDP.
- * NIX = TX * 32 (For Burst SQE flush).
- */
- pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32);
- pfvf->npa_lmt_lines = qcount * 3;
- pfvf->nix_lmt_size = LMT_BURST_SIZE * LMT_LINE_SIZE;
+ /* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/
+ pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE);
+ pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info);
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
@@ -66,6 +61,13 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf)
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
+ for_each_possible_cpu(cpu) {
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, cpu);
+ lmt_info->lmt_addr = ((u64)pfvf->hw.lmt_base +
+ (cpu * LMT_BURST_SIZE * LMT_LINE_SIZE));
+ lmt_info->lmt_id = cpu * LMT_BURST_SIZE;
+ }
+
return 0;
}
EXPORT_SYMBOL(cn10k_lmtst_init);
@@ -74,13 +76,6 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
{
struct nix_cn10k_aq_enq_req *aq;
struct otx2_nic *pfvf = dev;
- struct otx2_snd_queue *sq;
-
- sq = &pfvf->qset.sq[qidx];
- sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base +
- (qidx * pfvf->nix_lmt_size));
-
- sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE);
/* Get memory to put this msg */
aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
@@ -125,8 +120,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
if (num_ptrs--)
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
- num_ptrs,
- cq->rbpool->lmt_addr);
+ num_ptrs);
break;
}
cq->pool_ptrs--;
@@ -134,8 +128,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
num_ptrs++;
if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
- num_ptrs,
- cq->rbpool->lmt_addr);
+ num_ptrs);
num_ptrs = 1;
}
}
@@ -143,20 +136,23 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
{
+ struct otx2_lmt_info *lmt_info;
+ struct otx2_nic *pfvf = dev;
u64 val = 0, tar_addr = 0;
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id());
/* FIXME: val[0:10] LMT_ID.
* [12:15] no of LMTST - 1 in the burst.
* [19:63] data size of each LMTST in the burst except first.
*/
- val = (sq->lmt_id & 0x7FF);
+ val = (lmt_info->lmt_id & 0x7FF);
/* Target address for LMTST flush tells HW how many 128bit
* words are present.
* tar_addr[6:4] size of first LMTST - 1 in units of 128b.
*/
tar_addr |= sq->io_addr | (((size / 16) - 1) & 0x7) << 4;
dma_wmb();
- memcpy(sq->lmt_addr, sq->sqe_base, size);
+ memcpy((u64 *)lmt_info->lmt_addr, sq->sqe_base, size);
cn10k_lmt_flush(val, tar_addr);
sq->head++;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index ce25c2744435..78df173e6df2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1230,11 +1230,6 @@ static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
pool->rbsize = buf_size;
- /* Set LMTST addr for NPA batch free */
- if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag))
- pool->lmt_addr = (__force u64 *)((u64)pfvf->hw.npa_lmt_base +
- (pool_id * LMT_LINE_SIZE));
-
/* Initialize this pool's context via AF */
aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
if (!aq) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 48227cec06ee..a51ecd771d07 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -53,6 +53,10 @@ enum arua_mapped_qtypes {
/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
#define SEND_CQ_SKID 2000
+struct otx2_lmt_info {
+ u64 lmt_addr;
+ u16 lmt_id;
+};
/* RSS configuration */
struct otx2_rss_ctx {
u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
@@ -224,8 +228,7 @@ struct otx2_hw {
#define LMT_LINE_SIZE 128
#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */
u64 *lmt_base;
- u64 *npa_lmt_base;
- u64 *nix_lmt_base;
+ struct otx2_lmt_info __percpu *lmt_info;
};
enum vfperm {
@@ -407,17 +410,18 @@ static inline bool is_96xx_B0(struct pci_dev *pdev)
*/
#define PCI_REVISION_ID_96XX 0x00
#define PCI_REVISION_ID_95XX 0x10
-#define PCI_REVISION_ID_LOKI 0x20
+#define PCI_REVISION_ID_95XXN 0x20
#define PCI_REVISION_ID_98XX 0x30
#define PCI_REVISION_ID_95XXMM 0x40
+#define PCI_REVISION_ID_95XXO 0xE0
static inline bool is_dev_otx2(struct pci_dev *pdev)
{
u8 midr = pdev->revision & 0xF0;
return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
- midr == PCI_REVISION_ID_LOKI || midr == PCI_REVISION_ID_98XX ||
- midr == PCI_REVISION_ID_95XXMM);
+ midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX ||
+ midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
@@ -562,15 +566,16 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
#endif
static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
- u64 *ptrs, u64 num_ptrs,
- u64 *lmt_addr)
+ u64 *ptrs, u64 num_ptrs)
{
+ struct otx2_lmt_info *lmt_info;
u64 size = 0, count_eot = 0;
u64 tar_addr, val = 0;
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id());
tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0);
/* LMTID is same as AURA Id */
- val = (aura & 0x7FF) | BIT_ULL(63);
+ val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63);
/* Set if [127:64] of last 128bit word has a valid pointer */
count_eot = (num_ptrs % 2) ? 0ULL : 1ULL;
/* Set AURA ID to free pointer */
@@ -586,7 +591,7 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
size++;
tar_addr |= ((size - 1) & 0x7) << 4;
}
- memcpy(lmt_addr, ptrs, sizeof(u64) * num_ptrs);
+ memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
/* Perform LMTST flush */
cn10k_lmt_flush(val, tar_addr);
}
@@ -594,12 +599,11 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
{
struct otx2_nic *pfvf = dev;
- struct otx2_pool *pool;
u64 ptrs[2];
- pool = &pfvf->qset.pool[aura];
ptrs[1] = buf;
- __cn10k_aura_freeptr(pfvf, aura, ptrs, 2, pool->lmt_addr);
+ /* Free only one buffer at time during init and teardown */
+ __cn10k_aura_freeptr(pfvf, aura, ptrs, 2);
}
/* Alloc pointer from pool/aura */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 799486c72177..dbfa3bc39e34 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -16,8 +16,8 @@
#include "otx2_common.h"
#include "otx2_ptp.h"
-#define DRV_NAME "octeontx2-nicpf"
-#define DRV_VF_NAME "octeontx2-nicvf"
+#define DRV_NAME "rvu-nicpf"
+#define DRV_VF_NAME "rvu-nicvf"
struct otx2_stat {
char name[ETH_GSTRING_LEN];
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 2f2e8a3d7924..53df7fff92c4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1533,14 +1533,6 @@ int otx2_open(struct net_device *netdev)
if (!qset->rq)
goto err_free_mem;
- if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
- /* Reserve LMT lines for NPA AURA batch free */
- pf->hw.npa_lmt_base = pf->hw.lmt_base;
- /* Reserve LMT lines for NIX TX */
- pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base +
- (pf->npa_lmt_lines * LMT_LINE_SIZE));
- }
-
err = otx2_init_hw_resources(pf);
if (err)
goto err_free_mem;
@@ -2668,6 +2660,8 @@ err_del_mcam_entries:
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
+ if (pf->hw.lmt_info)
+ free_percpu(pf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
qmem_free(pf->dev, pf->dync_lmt);
otx2_detach_resources(&pf->mbox);
@@ -2811,6 +2805,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(pf);
otx2_shutdown_tc(pf);
otx2_detach_resources(&pf->mbox);
+ if (pf->hw.lmt_info)
+ free_percpu(pf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
qmem_free(pf->dev, pf->dync_lmt);
otx2_disable_mbox_intr(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 869de5f59e73..3ff1ad79c001 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -80,7 +80,6 @@ struct otx2_snd_queue {
u16 num_sqbs;
u16 sqe_thresh;
u8 sqe_per_sqb;
- u32 lmt_id;
u64 io_addr;
u64 *aura_fc_addr;
u64 *lmt_addr;
@@ -111,7 +110,6 @@ struct otx2_cq_poll {
struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
- u64 *lmt_addr;
u16 rbsize;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index e91b4874a57f..3de1a03839e2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -582,7 +582,10 @@ static int ionic_set_ringparam(struct net_device *netdev,
qparam.ntxq_descs = ring->tx_pending;
qparam.nrxq_descs = ring->rx_pending;
+
+ mutex_lock(&lif->queue_lock);
err = ionic_reconfigure_queues(lif, &qparam);
+ mutex_unlock(&lif->queue_lock);
if (err)
netdev_info(netdev, "Ring reconfiguration failed, changes canceled: %d\n", err);
@@ -679,7 +682,9 @@ static int ionic_set_channels(struct net_device *netdev,
return 0;
}
+ mutex_lock(&lif->queue_lock);
err = ionic_reconfigure_queues(lif, &qparam);
+ mutex_unlock(&lif->queue_lock);
if (err)
netdev_info(netdev, "Queue reconfiguration failed, changes canceled: %d\n", err);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 23c9e196a784..381966e8f557 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1715,7 +1715,6 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
{
/* Stop and clean the queues before reconfiguration */
- mutex_lock(&lif->queue_lock);
netif_device_detach(lif->netdev);
ionic_stop_queues(lif);
ionic_txrx_deinit(lif);
@@ -1734,8 +1733,7 @@ static int ionic_start_queues_reconfig(struct ionic_lif *lif)
* DOWN and UP to try to reset and clear the issue.
*/
err = ionic_txrx_init(lif);
- mutex_unlock(&lif->queue_lock);
- ionic_link_status_check_request(lif, CAN_SLEEP);
+ ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
netif_device_attach(lif->netdev);
return err;
@@ -1765,9 +1763,13 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+ mutex_lock(&lif->queue_lock);
ionic_stop_queues_reconfig(lif);
netdev->mtu = new_mtu;
- return ionic_start_queues_reconfig(lif);
+ err = ionic_start_queues_reconfig(lif);
+ mutex_unlock(&lif->queue_lock);
+
+ return err;
}
static void ionic_tx_timeout_work(struct work_struct *ws)
@@ -1783,8 +1785,10 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
if (!netif_running(lif->netdev))
return;
+ mutex_lock(&lif->queue_lock);
ionic_stop_queues_reconfig(lif);
ionic_start_queues_reconfig(lif);
+ mutex_unlock(&lif->queue_lock);
}
static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index 7e3a5634c161..25ecfcfa1281 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -318,7 +318,7 @@ void ionic_rx_filter_sync(struct ionic_lif *lif)
if (f->state == IONIC_FILTER_STATE_NEW ||
f->state == IONIC_FILTER_STATE_OLD) {
sync_item = devm_kzalloc(dev, sizeof(*sync_item),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!sync_item)
goto loop_out;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 5a0a3cbcc1c1..cb0f2a3a1ac9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -2226,8 +2226,8 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
break;
default:
- DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
- return -EINVAL;
+ DP_NOTICE(p_hwfn, "-EOPNOTSUPP elem type = %d", elem_type);
+ return -EOPNOTSUPP;
}
/* Calculate line in ilt */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 3d61a767a8a3..09f20c794754 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -437,7 +437,6 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1);
msleep(20);
- qlcnic_rom_unlock(adapter);
/* big hammer don't reset CAM block on reset */
QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
index 79e50079ed03..f72e13b83869 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
@@ -100,7 +100,7 @@ static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
case ETH_SS_STATS:
for (i = 0; i < EMAC_STATS_LEN; i++) {
- strlcpy(data, emac_ethtool_stat_strings[i],
+ strscpy(data, emac_ethtool_stat_strings[i],
ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6c8ba916d1a6..1374faa229a2 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2533,6 +2533,7 @@ static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb,
else
txdesc->status |= cpu_to_le32(TD_TACT);
+ wmb(); /* cur_tx must be incremented after TACT bit was set */
mdp->cur_tx++;
if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index a295e2621cf3..43ef4f529028 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -900,74 +900,36 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* NIC VPD information
* Called during probe to display the part number of the
- * installed NIC. VPD is potentially very large but this should
- * always appear within the first 512 bytes.
+ * installed NIC.
*/
-#define SFC_VPD_LEN 512
static void efx_probe_vpd_strings(struct efx_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
- char vpd_data[SFC_VPD_LEN];
- ssize_t vpd_size;
- int ro_start, ro_size, i, j;
-
- /* Get the vpd data from the device */
- vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
- if (vpd_size <= 0) {
- netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
- return;
- }
-
- /* Get the Read only section */
- ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (ro_start < 0) {
- netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
- return;
- }
-
- ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- if (i + j > vpd_size)
- j = vpd_size - i;
-
- /* Get the Part number */
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Part number not found\n");
- return;
- }
+ unsigned int vpd_size, kw_len;
+ u8 *vpd_data;
+ int start;
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
+ vpd_data = pci_vpd_alloc(dev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(dev, "Unable to read VPD\n");
return;
}
- netif_info(efx, drv, efx->net_dev,
- "Part Number : %.*s\n", j, &vpd_data[i]);
-
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- j = ro_size;
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
- return;
- }
-
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
- return;
- }
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (start < 0)
+ pci_err(dev, "Part number not found or incomplete\n");
+ else
+ pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
- efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
- if (!efx->vpd_sn)
- return;
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+ if (start < 0)
+ pci_err(dev, "Serial number not found or incomplete\n");
+ else
+ efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
- snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
+ kfree(vpd_data);
}
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index c177ea0f301e..423bdf81200f 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2780,75 +2780,36 @@ static void ef4_pci_remove(struct pci_dev *pci_dev)
};
/* NIC VPD information
- * Called during probe to display the part number of the
- * installed NIC. VPD is potentially very large but this should
- * always appear within the first 512 bytes.
+ * Called during probe to display the part number of the installed NIC.
*/
-#define SFC_VPD_LEN 512
static void ef4_probe_vpd_strings(struct ef4_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
- char vpd_data[SFC_VPD_LEN];
- ssize_t vpd_size;
- int ro_start, ro_size, i, j;
-
- /* Get the vpd data from the device */
- vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
- if (vpd_size <= 0) {
- netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
- return;
- }
-
- /* Get the Read only section */
- ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (ro_start < 0) {
- netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
- return;
- }
-
- ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- if (i + j > vpd_size)
- j = vpd_size - i;
-
- /* Get the Part number */
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Part number not found\n");
- return;
- }
+ unsigned int vpd_size, kw_len;
+ u8 *vpd_data;
+ int start;
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
+ vpd_data = pci_vpd_alloc(dev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(dev, "Unable to read VPD\n");
return;
}
- netif_info(efx, drv, efx->net_dev,
- "Part Number : %.*s\n", j, &vpd_data[i]);
-
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- j = ro_size;
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
- return;
- }
-
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
- return;
- }
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (start < 0)
+ pci_warn(dev, "Part number not found or incomplete\n");
+ else
+ pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
- efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
- if (!efx->vpd_sn)
- return;
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+ if (start < 0)
+ pci_warn(dev, "Serial number not found or incomplete\n");
+ else
+ efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
- snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
+ kfree(vpd_data);
}
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 22cdbf12c823..b008b4e8a2a5 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1550,7 +1550,7 @@ static int smc911x_ethtool_getregslen(struct net_device *dev)
}
static void smc911x_ethtool_getregs(struct net_device *dev,
- struct ethtool_regs* regs, void *buf)
+ struct ethtool_regs *regs, void *buf)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned long flags;
@@ -1600,7 +1600,7 @@ static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
}
static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
- int cmd, int addr)
+ int cmd, int addr)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret;
@@ -1614,7 +1614,7 @@ static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
}
static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
- u8 *data)
+ u8 *data)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret;
@@ -1626,7 +1626,7 @@ static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
}
static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
- u8 data)
+ u8 data)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret;
@@ -1638,7 +1638,7 @@ static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
}
static int smc911x_ethtool_geteeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *data)
+ struct ethtool_eeprom *eeprom, u8 *data)
{
u8 eebuf[SMC911X_EEPROM_LEN];
int i, ret;
@@ -1654,7 +1654,7 @@ static int smc911x_ethtool_geteeprom(struct net_device *dev,
}
static int smc911x_ethtool_seteeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *data)
+ struct ethtool_eeprom *eeprom, u8 *data)
{
int i, ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index 4c9a37dd0d3f..ecf759ee1c9f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -109,8 +109,10 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
plat->bus_id = pci_dev_id(pdev);
phy_mode = device_get_phy_mode(&pdev->dev);
- if (phy_mode < 0)
+ if (phy_mode < 0) {
dev_err(&pdev->dev, "phy_mode not found\n");
+ return phy_mode;
+ }
plat->phy_interface = phy_mode;
plat->interface = PHY_INTERFACE_MODE_GMII;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ed0cd3920171..ece02b35a6ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -5347,7 +5347,7 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, rxtx_napi);
struct stmmac_priv *priv = ch->priv_data;
- int rx_done, tx_done;
+ int rx_done, tx_done, rxtx_done;
u32 chan = ch->index;
priv->xstats.napi_poll++;
@@ -5357,14 +5357,16 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
rx_done = stmmac_rx_zc(priv, budget, chan);
+ rxtx_done = max(tx_done, rx_done);
+
/* If either TX or RX work is not complete, return budget
* and keep pooling
*/
- if (tx_done >= budget || rx_done >= budget)
+ if (rxtx_done >= budget)
return budget;
/* all work done, exit the polling mode */
- if (napi_complete_done(napi, rx_done)) {
+ if (napi_complete_done(napi, rxtx_done)) {
unsigned long flags;
spin_lock_irqsave(&ch->lock, flags);
@@ -5375,7 +5377,7 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&ch->lock, flags);
}
- return min(rx_done, budget - 1);
+ return min(rxtx_done, budget - 1);
}
/**
@@ -7121,8 +7123,6 @@ int stmmac_suspend(struct device *dev)
if (!ndev || !netif_running(ndev))
return 0;
- phylink_mac_change(priv->phylink, false);
-
mutex_lock(&priv->lock);
netif_device_detach(ndev);
@@ -7148,14 +7148,6 @@ int stmmac_suspend(struct device *dev)
stmmac_pmt(priv, priv->hw, priv->wolopts);
priv->irq_wake = 1;
} else {
- mutex_unlock(&priv->lock);
- rtnl_lock();
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
- phylink_stop(priv->phylink);
- rtnl_unlock();
- mutex_lock(&priv->lock);
-
stmmac_mac_set(priv, priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
@@ -7169,6 +7161,16 @@ int stmmac_suspend(struct device *dev)
mutex_unlock(&priv->lock);
+ rtnl_lock();
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ phylink_suspend(priv->phylink, true);
+ } else {
+ if (device_may_wakeup(priv->device))
+ phylink_speed_down(priv->phylink, false);
+ phylink_suspend(priv->phylink, false);
+ }
+ rtnl_unlock();
+
if (priv->dma_cap.fpesel) {
/* Disable FPE */
stmmac_fpe_configure(priv, priv->ioaddr,
@@ -7259,13 +7261,15 @@ int stmmac_resume(struct device *dev)
return ret;
}
- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
- rtnl_lock();
- phylink_start(priv->phylink);
- /* We may have called phylink_speed_down before */
- phylink_speed_up(priv->phylink);
- rtnl_unlock();
+ rtnl_lock();
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ phylink_resume(priv->phylink);
+ } else {
+ phylink_resume(priv->phylink);
+ if (device_may_wakeup(priv->device))
+ phylink_speed_up(priv->phylink);
}
+ rtnl_unlock();
rtnl_lock();
mutex_lock(&priv->lock);
@@ -7286,8 +7290,6 @@ int stmmac_resume(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_unlock();
- phylink_mac_change(priv->phylink, true);
-
netif_device_attach(ndev);
return 0;
diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c
index ecece21315c3..39234852e01b 100644
--- a/drivers/net/ethernet/xscale/ptp_ixp46x.c
+++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c
@@ -16,7 +16,6 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/platform_device.h>
#include <linux/soc/ixp4xx/cpu.h>
-#include <linux/module.h>
#include <mach/ixp4xx-regs.h>
#include "ixp46x_ts.h"
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 2cdf9f989dec..a1464b764d4d 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -33,6 +33,7 @@
enum {
PHYLINK_DISABLE_STOPPED,
PHYLINK_DISABLE_LINK,
+ PHYLINK_DISABLE_MAC_WOL,
};
/**
@@ -1282,6 +1283,9 @@ EXPORT_SYMBOL_GPL(phylink_start);
* network device driver's &struct net_device_ops ndo_stop() method. The
* network device's carrier state should not be changed prior to calling this
* function.
+ *
+ * This will synchronously bring down the link if the link is not already
+ * down (in other words, it will trigger a mac_link_down() method call.)
*/
void phylink_stop(struct phylink *pl)
{
@@ -1302,6 +1306,84 @@ void phylink_stop(struct phylink *pl)
EXPORT_SYMBOL_GPL(phylink_stop);
/**
+ * phylink_suspend() - handle a network device suspend event
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan
+ *
+ * Handle a network device suspend event. There are several cases:
+ * - If Wake-on-Lan is not active, we can bring down the link between
+ * the MAC and PHY by calling phylink_stop().
+ * - If Wake-on-Lan is active, and being handled only by the PHY, we
+ * can also bring down the link between the MAC and PHY.
+ * - If Wake-on-Lan is active, but being handled by the MAC, the MAC
+ * still needs to receive packets, so we can not bring the link down.
+ */
+void phylink_suspend(struct phylink *pl, bool mac_wol)
+{
+ ASSERT_RTNL();
+
+ if (mac_wol && (!pl->netdev || pl->netdev->wol_enabled)) {
+ /* Wake-on-Lan enabled, MAC handling */
+ mutex_lock(&pl->state_mutex);
+
+ /* Stop the resolver bringing the link up */
+ __set_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state);
+
+ /* Disable the carrier, to prevent transmit timeouts,
+ * but one would hope all packets have been sent. This
+ * also means phylink_resolve() will do nothing.
+ */
+ netif_carrier_off(pl->netdev);
+
+ /* We do not call mac_link_down() here as we want the
+ * link to remain up to receive the WoL packets.
+ */
+ mutex_unlock(&pl->state_mutex);
+ } else {
+ phylink_stop(pl);
+ }
+}
+EXPORT_SYMBOL_GPL(phylink_suspend);
+
+/**
+ * phylink_resume() - handle a network device resume event
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Undo the effects of phylink_suspend(), returning the link to an
+ * operational state.
+ */
+void phylink_resume(struct phylink *pl)
+{
+ ASSERT_RTNL();
+
+ if (test_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state)) {
+ /* Wake-on-Lan enabled, MAC handling */
+
+ /* Call mac_link_down() so we keep the overall state balanced.
+ * Do this under the state_mutex lock for consistency. This
+ * will cause a "Link Down" message to be printed during
+ * resume, which is harmless - the true link state will be
+ * printed when we run a resolve.
+ */
+ mutex_lock(&pl->state_mutex);
+ phylink_link_down(pl);
+ mutex_unlock(&pl->state_mutex);
+
+ /* Re-apply the link parameters so that all the settings get
+ * restored to the MAC.
+ */
+ phylink_mac_initial_config(pl, true);
+
+ /* Re-enable and re-resolve the link parameters */
+ clear_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state);
+ phylink_run_resolve(pl);
+ } else {
+ phylink_start(pl);
+ }
+}
+EXPORT_SYMBOL_GPL(phylink_resume);
+
+/**
* phylink_ethtool_get_wol() - get the wake on lan parameters for the PHY
* @pl: a pointer to a &struct phylink returned from phylink_create()
* @wol: a pointer to &struct ethtool_wolinfo to hold the read parameters
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 4c4ab7b38d78..82bb5ed94c48 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -654,6 +654,11 @@ static const struct usb_device_id mbim_devs[] = {
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
+ /* Telit LN920 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1061, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+ },
+
/* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 7dc1ef3f93c3..a57251ba5991 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2535,13 +2535,17 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
if (!hso_net->mux_bulk_tx_buf)
goto err_free_tx_urb;
- add_net_device(hso_dev);
+ result = add_net_device(hso_dev);
+ if (result) {
+ dev_err(&interface->dev, "Failed to add net device\n");
+ goto err_free_tx_buf;
+ }
/* registering our net device */
result = register_netdev(net);
if (result) {
dev_err(&interface->dev, "Failed to register device\n");
- goto err_free_tx_buf;
+ goto err_rmv_ndev;
}
hso_log_port(hso_dev);
@@ -2550,8 +2554,9 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
return hso_dev;
-err_free_tx_buf:
+err_rmv_ndev:
remove_net_device(hso_dev);
+err_free_tx_buf:
kfree(hso_net->mux_bulk_tx_buf);
err_free_tx_urb:
usb_free_urb(hso_net->mux_bulk_tx_urb);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6a2e4f884b12..33ada2c59952 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1354,6 +1354,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 52d1d391f4c6..d8231cc821ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -9,7 +9,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 65
+#define IWL_22000_UCODE_API_MAX 66
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 314ed90c23dd..dde22bdc8703 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -231,6 +231,7 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
{
const struct firmware *pnvm;
char pnvm_name[MAX_PNVM_NAME];
+ size_t new_len;
int ret;
iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name));
@@ -242,11 +243,14 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
return ret;
}
+ new_len = pnvm->size;
*data = kmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
+ release_firmware(pnvm);
+
if (!*data)
return -ENOMEM;
- *len = pnvm->size;
+ *len = new_len;
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index a7c79d814aa4..c875bf35533c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -49,14 +49,14 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
err = efivar_entry_get(pnvm_efivar, NULL, &package_size, data);
if (err) {
IWL_DEBUG_FW(trans,
- "PNVM UEFI variable not found %d (len %zd)\n",
+ "PNVM UEFI variable not found %d (len %lu)\n",
err, package_size);
kfree(data);
data = ERR_PTR(err);
goto out;
}
- IWL_DEBUG_FW(trans, "Read PNVM from UEFI with size %zd\n", package_size);
+ IWL_DEBUG_FW(trans, "Read PNVM from UEFI with size %lu\n", package_size);
*len = package_size;
out:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 6f60018feed1..77ea2d0a3091 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -686,6 +686,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
{
int ret;
+ rtnl_lock();
mutex_lock(&mvm->mutex);
ret = iwl_run_init_mvm_ucode(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 8dc1b8eecb86..61b2797a34a8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -558,6 +558,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x6074, iwl_ax201_cfg_quz_hr, NULL),
diff --git a/drivers/net/wireless/intersil/orinoco/hermes.c b/drivers/net/wireless/intersil/orinoco/hermes.c
index 6d4b7f64efcf..256946552742 100644
--- a/drivers/net/wireless/intersil/orinoco/hermes.c
+++ b/drivers/net/wireless/intersil/orinoco/hermes.c
@@ -79,7 +79,6 @@
#undef HERMES_DEBUG
#ifdef HERMES_DEBUG
-#include <stdarg.h>
#define DEBUG(lvl, stuff...) if ((lvl) <= HERMES_DEBUG) DMSG(stuff)
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
index 0d2f10e4cbc8..dc65b0712261 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -7,7 +7,6 @@
#define IOSM_IPC_IMEM_H
#include <linux/skbuff.h>
-#include <stdbool.h>
#include "iosm_ipc_mmio.h"
#include "iosm_ipc_pcie.h"
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
index 06c94b1720b6..09f94c123531 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mmio.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
@@ -69,7 +69,7 @@ void ipc_mmio_update_cp_capability(struct iosm_mmio *ipc_mmio)
unsigned int ver;
ver = ipc_mmio_get_cp_version(ipc_mmio);
- cp_cap = readl(ipc_mmio->base + ipc_mmio->offset.cp_capability);
+ cp_cap = ioread32(ipc_mmio->base + ipc_mmio->offset.cp_capability);
ipc_mmio->has_mux_lite = (ver >= IOSM_CP_VERSION) &&
!(cp_cap & DL_AGGR) && !(cp_cap & UL_AGGR);
@@ -150,8 +150,8 @@ enum ipc_mem_exec_stage ipc_mmio_get_exec_stage(struct iosm_mmio *ipc_mmio)
if (!ipc_mmio)
return IPC_MEM_EXEC_STAGE_INVALID;
- return (enum ipc_mem_exec_stage)readl(ipc_mmio->base +
- ipc_mmio->offset.exec_stage);
+ return (enum ipc_mem_exec_stage)ioread32(ipc_mmio->base +
+ ipc_mmio->offset.exec_stage);
}
void ipc_mmio_copy_chip_info(struct iosm_mmio *ipc_mmio, void *dest,
@@ -167,8 +167,8 @@ enum ipc_mem_device_ipc_state ipc_mmio_get_ipc_state(struct iosm_mmio *ipc_mmio)
if (!ipc_mmio)
return IPC_MEM_DEVICE_IPC_INVALID;
- return (enum ipc_mem_device_ipc_state)
- readl(ipc_mmio->base + ipc_mmio->offset.ipc_status);
+ return (enum ipc_mem_device_ipc_state)ioread32(ipc_mmio->base +
+ ipc_mmio->offset.ipc_status);
}
enum rom_exit_code ipc_mmio_get_rom_exit_code(struct iosm_mmio *ipc_mmio)
@@ -176,8 +176,8 @@ enum rom_exit_code ipc_mmio_get_rom_exit_code(struct iosm_mmio *ipc_mmio)
if (!ipc_mmio)
return IMEM_ROM_EXIT_FAIL;
- return (enum rom_exit_code)readl(ipc_mmio->base +
- ipc_mmio->offset.rom_exit_code);
+ return (enum rom_exit_code)ioread32(ipc_mmio->base +
+ ipc_mmio->offset.rom_exit_code);
}
void ipc_mmio_config(struct iosm_mmio *ipc_mmio)
@@ -188,10 +188,10 @@ void ipc_mmio_config(struct iosm_mmio *ipc_mmio)
/* AP memory window (full window is open and active so that modem checks
* each AP address) 0 means don't check on modem side.
*/
- iowrite64_lo_hi(0, ipc_mmio->base + ipc_mmio->offset.ap_win_base);
- iowrite64_lo_hi(0, ipc_mmio->base + ipc_mmio->offset.ap_win_end);
+ iowrite64(0, ipc_mmio->base + ipc_mmio->offset.ap_win_base);
+ iowrite64(0, ipc_mmio->base + ipc_mmio->offset.ap_win_end);
- iowrite64_lo_hi(ipc_mmio->context_info_addr,
+ iowrite64(ipc_mmio->context_info_addr,
ipc_mmio->base + ipc_mmio->offset.context_info);
}
@@ -201,8 +201,8 @@ void ipc_mmio_set_psi_addr_and_size(struct iosm_mmio *ipc_mmio, dma_addr_t addr,
if (!ipc_mmio)
return;
- iowrite64_lo_hi(addr, ipc_mmio->base + ipc_mmio->offset.psi_address);
- writel(size, ipc_mmio->base + ipc_mmio->offset.psi_size);
+ iowrite64(addr, ipc_mmio->base + ipc_mmio->offset.psi_address);
+ iowrite32(size, ipc_mmio->base + ipc_mmio->offset.psi_size);
}
void ipc_mmio_set_contex_info_addr(struct iosm_mmio *ipc_mmio, phys_addr_t addr)
@@ -218,6 +218,8 @@ void ipc_mmio_set_contex_info_addr(struct iosm_mmio *ipc_mmio, phys_addr_t addr)
int ipc_mmio_get_cp_version(struct iosm_mmio *ipc_mmio)
{
- return ipc_mmio ? readl(ipc_mmio->base + ipc_mmio->offset.cp_version) :
- -EFAULT;
+ if (ipc_mmio)
+ return ioread32(ipc_mmio->base + ipc_mmio->offset.cp_version);
+
+ return -EFAULT;
}
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 71428d8cbcfc..87847c380051 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -1176,22 +1176,14 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
pci_set_master(pdev);
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc) {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
goto err_dma_mask;
dev_warn(&pdev->dev, "Cannot DMA highmem\n");
}
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc)
- goto err_dma_mask;
- dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
- }
-
ndev->self_mmio = pci_iomap(pdev, 0, 0);
if (!ndev->self_mmio) {
rc = -EIO;
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index e7a4c2aa8baa..733557231ed0 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2640,26 +2640,15 @@ static int idt_init_pci(struct idt_ntb_dev *ndev)
int ret;
/* Initialize the bit mask of PCI/NTB DMA */
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret != 0) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret != 0) {
dev_err(&pdev->dev, "Failed to set DMA bit mask\n");
return ret;
}
dev_warn(&pdev->dev, "Cannot set DMA highmem bit mask\n");
}
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret != 0) {
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret != 0) {
- dev_err(&pdev->dev,
- "Failed to set consistent DMA bit mask\n");
- return ret;
- }
- dev_warn(&pdev->dev,
- "Cannot set consistent DMA highmem bit mask\n");
- }
/*
* Enable the device advanced error reporting. It's not critical to
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index 093dd20057b9..e5f14e20a9ff 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -1771,22 +1771,14 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
pci_set_master(pdev);
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc) {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
goto err_dma_mask;
dev_warn(&pdev->dev, "Cannot DMA highmem\n");
}
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc)
- goto err_dma_mask;
- dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
- }
-
ndev->self_mmio = pci_iomap(pdev, 0, 0);
if (!ndev->self_mmio) {
rc = -EIO;
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index 05e2335c9596..b233d1c6ba2d 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -43,9 +43,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Intel PCIe NTB Linux driver
- *
- * Contact Information:
- * Jon Mason <jon.mason@intel.com>
*/
#ifndef NTB_HW_INTEL_H
diff --git a/drivers/ntb/test/ntb_msi_test.c b/drivers/ntb/test/ntb_msi_test.c
index 7095ecd6223a..4e18e08776c9 100644
--- a/drivers/ntb/test/ntb_msi_test.c
+++ b/drivers/ntb/test/ntb_msi_test.c
@@ -369,8 +369,10 @@ static int ntb_msit_probe(struct ntb_client *client, struct ntb_dev *ntb)
if (ret)
goto remove_dbgfs;
- if (!nm->isr_ctx)
+ if (!nm->isr_ctx) {
+ ret = -ENOMEM;
goto remove_dbgfs;
+ }
ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 89df1350fefd..65e1e5cf1b29 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -598,6 +598,7 @@ static int perf_setup_inbuf(struct perf_peer *peer)
return -ENOMEM;
}
if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
+ ret = -EINVAL;
dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n");
goto err_free_inbuf;
}
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 2164e8492772..8aeca7914050 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -187,7 +187,7 @@ static void pp_ping(struct pp_ctx *pp)
static void pp_pong(struct pp_ctx *pp)
{
- u32 msg_data = -1, spad_data = -1;
+ u32 msg_data, spad_data;
int pidx = 0;
/* Read pong data */
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 9251441fd8a3..7f473f9db300 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -346,29 +346,45 @@ static bool preamble_next(struct nvdimm_drvdata *ndd,
free, nslot);
}
+static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ u64 sum, sum_save;
+
+ if (!namespace_label_has(ndd, checksum))
+ return true;
+
+ sum_save = nsl_get_checksum(ndd, nd_label);
+ nsl_set_checksum(ndd, nd_label, 0);
+ sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
+ nsl_set_checksum(ndd, nd_label, sum_save);
+ return sum == sum_save;
+}
+
+static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ u64 sum;
+
+ if (!namespace_label_has(ndd, checksum))
+ return;
+ nsl_set_checksum(ndd, nd_label, 0);
+ sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
+ nsl_set_checksum(ndd, nd_label, sum);
+}
+
static bool slot_valid(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, u32 slot)
{
+ bool valid;
+
/* check that we are written where we expect to be written */
- if (slot != __le32_to_cpu(nd_label->slot))
+ if (slot != nsl_get_slot(ndd, nd_label))
return false;
-
- /* check checksum */
- if (namespace_label_has(ndd, checksum)) {
- u64 sum, sum_save;
-
- sum_save = __le64_to_cpu(nd_label->checksum);
- nd_label->checksum = __cpu_to_le64(0);
- sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
- nd_label->checksum = __cpu_to_le64(sum_save);
- if (sum != sum_save) {
- dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
- slot, sum);
- return false;
- }
- }
-
- return true;
+ valid = nsl_validate_checksum(ndd, nd_label);
+ if (!valid)
+ dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot);
+ return valid;
}
int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
@@ -395,13 +411,13 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
continue;
memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
- flags = __le32_to_cpu(nd_label->flags);
+ flags = nsl_get_flags(ndd, nd_label);
if (test_bit(NDD_NOBLK, &nvdimm->flags))
flags &= ~NSLABEL_FLAG_LOCAL;
nd_label_gen_id(&label_id, label_uuid, flags);
res = nvdimm_allocate_dpa(ndd, &label_id,
- __le64_to_cpu(nd_label->dpa),
- __le64_to_cpu(nd_label->rawsize));
+ nsl_get_dpa(ndd, nd_label),
+ nsl_get_rawsize(ndd, nd_label));
nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
if (!res)
return -EBUSY;
@@ -548,9 +564,9 @@ int nd_label_active_count(struct nvdimm_drvdata *ndd)
nd_label = to_label(ndd, slot);
if (!slot_valid(ndd, nd_label, slot)) {
- u32 label_slot = __le32_to_cpu(nd_label->slot);
- u64 size = __le64_to_cpu(nd_label->rawsize);
- u64 dpa = __le64_to_cpu(nd_label->dpa);
+ u32 label_slot = nsl_get_slot(ndd, nd_label);
+ u64 size = nsl_get_rawsize(ndd, nd_label);
+ u64 dpa = nsl_get_dpa(ndd, nd_label);
dev_dbg(ndd->dev,
"slot%d invalid slot: %d dpa: %llx size: %llx\n",
@@ -708,7 +724,7 @@ static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
- (unsigned long) to_namespace_index(ndd, 0);
}
-enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
+static enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
{
if (guid_equal(guid, &nvdimm_btt_guid))
return NVDIMM_CCLASS_BTT;
@@ -756,6 +772,45 @@ static void reap_victim(struct nd_mapping *nd_mapping,
victim->label = NULL;
}
+static void nsl_set_type_guid(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, guid_t *guid)
+{
+ if (namespace_label_has(ndd, type_guid))
+ guid_copy(&nd_label->type_guid, guid);
+}
+
+bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, guid_t *guid)
+{
+ if (!namespace_label_has(ndd, type_guid))
+ return true;
+ if (!guid_equal(&nd_label->type_guid, guid)) {
+ dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid,
+ &nd_label->type_guid);
+ return false;
+ }
+ return true;
+}
+
+static void nsl_set_claim_class(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ enum nvdimm_claim_class claim_class)
+{
+ if (!namespace_label_has(ndd, abstraction_guid))
+ return;
+ guid_copy(&nd_label->abstraction_guid,
+ to_abstraction_guid(claim_class,
+ &nd_label->abstraction_guid));
+}
+
+enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ if (!namespace_label_has(ndd, abstraction_guid))
+ return NVDIMM_CCLASS_NONE;
+ return to_nvdimm_cclass(&nd_label->abstraction_guid);
+}
+
static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
int pos, unsigned long flags)
@@ -797,29 +852,18 @@ static int __pmem_label_update(struct nd_region *nd_region,
nd_label = to_label(ndd, slot);
memset(nd_label, 0, sizeof_namespace_label(ndd));
memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
- if (nspm->alt_name)
- memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
- nd_label->flags = __cpu_to_le32(flags);
- nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
- nd_label->position = __cpu_to_le16(pos);
- nd_label->isetcookie = __cpu_to_le64(cookie);
- nd_label->rawsize = __cpu_to_le64(resource_size(res));
- nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
- nd_label->dpa = __cpu_to_le64(res->start);
- nd_label->slot = __cpu_to_le32(slot);
- if (namespace_label_has(ndd, type_guid))
- guid_copy(&nd_label->type_guid, &nd_set->type_guid);
- if (namespace_label_has(ndd, abstraction_guid))
- guid_copy(&nd_label->abstraction_guid,
- to_abstraction_guid(ndns->claim_class,
- &nd_label->abstraction_guid));
- if (namespace_label_has(ndd, checksum)) {
- u64 sum;
-
- nd_label->checksum = __cpu_to_le64(0);
- sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
- nd_label->checksum = __cpu_to_le64(sum);
- }
+ nsl_set_name(ndd, nd_label, nspm->alt_name);
+ nsl_set_flags(ndd, nd_label, flags);
+ nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings);
+ nsl_set_position(ndd, nd_label, pos);
+ nsl_set_isetcookie(ndd, nd_label, cookie);
+ nsl_set_rawsize(ndd, nd_label, resource_size(res));
+ nsl_set_lbasize(ndd, nd_label, nspm->lbasize);
+ nsl_set_dpa(ndd, nd_label, res->start);
+ nsl_set_slot(ndd, nd_label, slot);
+ nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
+ nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
+ nsl_calculate_checksum(ndd, nd_label);
nd_dbg_dpa(nd_region, ndd, res, "\n");
/* update label */
@@ -879,9 +923,9 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd,
struct resource *res;
for_each_dpa_resource(ndd, res) {
- if (res->start != __le64_to_cpu(nd_label->dpa))
+ if (res->start != nsl_get_dpa(ndd, nd_label))
continue;
- if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
+ if (resource_size(res) != nsl_get_rawsize(ndd, nd_label))
continue;
return res;
}
@@ -890,6 +934,59 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd,
}
/*
+ * Use the presence of the type_guid as a flag to determine isetcookie
+ * usage and nlabel + position policy for blk-aperture namespaces.
+ */
+static void nsl_set_blk_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 isetcookie)
+{
+ if (namespace_label_has(ndd, type_guid)) {
+ nsl_set_isetcookie(ndd, nd_label, isetcookie);
+ return;
+ }
+ nsl_set_isetcookie(ndd, nd_label, 0); /* N/A */
+}
+
+bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 isetcookie)
+{
+ if (!namespace_label_has(ndd, type_guid))
+ return true;
+
+ if (nsl_get_isetcookie(ndd, nd_label) != isetcookie) {
+ dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n", isetcookie,
+ nsl_get_isetcookie(ndd, nd_label));
+ return false;
+ }
+
+ return true;
+}
+
+static void nsl_set_blk_nlabel(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, int nlabel,
+ bool first)
+{
+ if (!namespace_label_has(ndd, type_guid)) {
+ nsl_set_nlabel(ndd, nd_label, 0); /* N/A */
+ return;
+ }
+ nsl_set_nlabel(ndd, nd_label, first ? nlabel : 0xffff);
+}
+
+static void nsl_set_blk_position(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ bool first)
+{
+ if (!namespace_label_has(ndd, type_guid)) {
+ nsl_set_position(ndd, nd_label, 0);
+ return;
+ }
+ nsl_set_position(ndd, nd_label, first ? 0 : 0xffff);
+}
+
+/*
* 1/ Account all the labels that can be freed after this update
* 2/ Allocate and write the label to the staging (next) index
* 3/ Record the resources in the namespace device
@@ -1017,50 +1114,21 @@ static int __blk_label_update(struct nd_region *nd_region,
nd_label = to_label(ndd, slot);
memset(nd_label, 0, sizeof_namespace_label(ndd));
memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
- if (nsblk->alt_name)
- memcpy(nd_label->name, nsblk->alt_name,
- NSLABEL_NAME_LEN);
- nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
-
- /*
- * Use the presence of the type_guid as a flag to
- * determine isetcookie usage and nlabel + position
- * policy for blk-aperture namespaces.
- */
- if (namespace_label_has(ndd, type_guid)) {
- if (i == min_dpa_idx) {
- nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
- nd_label->position = __cpu_to_le16(0);
- } else {
- nd_label->nlabel = __cpu_to_le16(0xffff);
- nd_label->position = __cpu_to_le16(0xffff);
- }
- nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
- } else {
- nd_label->nlabel = __cpu_to_le16(0); /* N/A */
- nd_label->position = __cpu_to_le16(0); /* N/A */
- nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
- }
-
- nd_label->dpa = __cpu_to_le64(res->start);
- nd_label->rawsize = __cpu_to_le64(resource_size(res));
- nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
- nd_label->slot = __cpu_to_le32(slot);
- if (namespace_label_has(ndd, type_guid))
- guid_copy(&nd_label->type_guid, &nd_set->type_guid);
- if (namespace_label_has(ndd, abstraction_guid))
- guid_copy(&nd_label->abstraction_guid,
- to_abstraction_guid(ndns->claim_class,
- &nd_label->abstraction_guid));
-
- if (namespace_label_has(ndd, checksum)) {
- u64 sum;
-
- nd_label->checksum = __cpu_to_le64(0);
- sum = nd_fletcher64(nd_label,
- sizeof_namespace_label(ndd), 1);
- nd_label->checksum = __cpu_to_le64(sum);
- }
+ nsl_set_name(ndd, nd_label, nsblk->alt_name);
+ nsl_set_flags(ndd, nd_label, NSLABEL_FLAG_LOCAL);
+
+ nsl_set_blk_nlabel(ndd, nd_label, nsblk->num_resources,
+ i == min_dpa_idx);
+ nsl_set_blk_position(ndd, nd_label, i == min_dpa_idx);
+ nsl_set_blk_isetcookie(ndd, nd_label, nd_set->cookie2);
+
+ nsl_set_dpa(ndd, nd_label, res->start);
+ nsl_set_rawsize(ndd, nd_label, resource_size(res));
+ nsl_set_lbasize(ndd, nd_label, nsblk->lbasize);
+ nsl_set_slot(ndd, nd_label, slot);
+ nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
+ nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
+ nsl_calculate_checksum(ndd, nd_label);
/* update label */
offset = nd_label_offset(ndd, nd_label);
diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h
index 956b6d1bd8cc..31f94fad7b92 100644
--- a/drivers/nvdimm/label.h
+++ b/drivers/nvdimm/label.h
@@ -135,7 +135,6 @@ struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n);
u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd);
bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot);
u32 nd_label_nfree(struct nvdimm_drvdata *ndd);
-enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid);
struct nd_region;
struct nd_namespace_pmem;
struct nd_namespace_blk;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 745478213ff2..4cec171c934d 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1235,7 +1235,7 @@ static int namespace_update_uuid(struct nd_region *nd_region,
if (!nd_label)
continue;
nd_label_gen_id(&label_id, nd_label->uuid,
- __le32_to_cpu(nd_label->flags));
+ nsl_get_flags(ndd, nd_label));
if (strcmp(old_label_id.id, label_id.id) == 0)
set_bit(ND_LABEL_REAP, &label_ent->flags);
}
@@ -1847,28 +1847,21 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
struct nd_namespace_label *nd_label = label_ent->label;
u16 position, nlabel;
- u64 isetcookie;
if (!nd_label)
continue;
- isetcookie = __le64_to_cpu(nd_label->isetcookie);
- position = __le16_to_cpu(nd_label->position);
- nlabel = __le16_to_cpu(nd_label->nlabel);
+ position = nsl_get_position(ndd, nd_label);
+ nlabel = nsl_get_nlabel(ndd, nd_label);
- if (isetcookie != cookie)
+ if (!nsl_validate_isetcookie(ndd, nd_label, cookie))
continue;
if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
continue;
- if (namespace_label_has(ndd, type_guid)
- && !guid_equal(&nd_set->type_guid,
- &nd_label->type_guid)) {
- dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
- &nd_set->type_guid,
- &nd_label->type_guid);
+ if (!nsl_validate_type_guid(ndd, nd_label,
+ &nd_set->type_guid))
continue;
- }
if (found_uuid) {
dev_dbg(ndd->dev, "duplicate entry for uuid\n");
@@ -1923,8 +1916,8 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
*/
hw_start = nd_mapping->start;
hw_end = hw_start + nd_mapping->size;
- pmem_start = __le64_to_cpu(nd_label->dpa);
- pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
+ pmem_start = nsl_get_dpa(ndd, nd_label);
+ pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label);
if (pmem_start >= hw_start && pmem_start < hw_end
&& pmem_end <= hw_end && pmem_end > hw_start)
/* pass */;
@@ -1947,14 +1940,16 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
* @nd_label: target pmem namespace label to evaluate
*/
static struct device *create_namespace_pmem(struct nd_region *nd_region,
- struct nd_namespace_index *nsindex,
- struct nd_namespace_label *nd_label)
+ struct nd_mapping *nd_mapping,
+ struct nd_namespace_label *nd_label)
{
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ struct nd_namespace_index *nsindex =
+ to_namespace_index(ndd, ndd->ns_current);
u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
struct nd_label_ent *label_ent;
struct nd_namespace_pmem *nspm;
- struct nd_mapping *nd_mapping;
resource_size_t size = 0;
struct resource *res;
struct device *dev;
@@ -1966,10 +1961,10 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
return ERR_PTR(-ENXIO);
}
- if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
+ if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) {
dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
nd_label->uuid);
- if (__le64_to_cpu(nd_label->isetcookie) != altcookie)
+ if (!nsl_validate_isetcookie(ndd, nd_label, altcookie))
return ERR_PTR(-EAGAIN);
dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
@@ -2037,20 +2032,18 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
continue;
}
- size += __le64_to_cpu(label0->rawsize);
- if (__le16_to_cpu(label0->position) != 0)
+ ndd = to_ndd(nd_mapping);
+ size += nsl_get_rawsize(ndd, label0);
+ if (nsl_get_position(ndd, label0) != 0)
continue;
WARN_ON(nspm->alt_name || nspm->uuid);
- nspm->alt_name = kmemdup((void __force *) label0->name,
- NSLABEL_NAME_LEN, GFP_KERNEL);
+ nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0),
+ NSLABEL_NAME_LEN, GFP_KERNEL);
nspm->uuid = kmemdup((void __force *) label0->uuid,
NSLABEL_UUID_LEN, GFP_KERNEL);
- nspm->lbasize = __le64_to_cpu(label0->lbasize);
- ndd = to_ndd(nd_mapping);
- if (namespace_label_has(ndd, abstraction_guid))
- nspm->nsio.common.claim_class
- = to_nvdimm_cclass(&label0->abstraction_guid);
-
+ nspm->lbasize = nsl_get_lbasize(ndd, label0);
+ nspm->nsio.common.claim_class =
+ nsl_get_claim_class(ndd, label0);
}
if (!nspm->alt_name || !nspm->uuid) {
@@ -2237,7 +2230,7 @@ static int add_namespace_resource(struct nd_region *nd_region,
if (is_namespace_blk(devs[i])) {
res = nsblk_add_resource(nd_region, ndd,
to_nd_namespace_blk(devs[i]),
- __le64_to_cpu(nd_label->dpa));
+ nsl_get_dpa(ndd, nd_label));
if (!res)
return -ENXIO;
nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
@@ -2265,21 +2258,10 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
struct device *dev = NULL;
struct resource *res;
- if (namespace_label_has(ndd, type_guid)) {
- if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) {
- dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
- &nd_set->type_guid,
- &nd_label->type_guid);
- return ERR_PTR(-EAGAIN);
- }
-
- if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) {
- dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n",
- nd_set->cookie2,
- __le64_to_cpu(nd_label->isetcookie));
- return ERR_PTR(-EAGAIN);
- }
- }
+ if (!nsl_validate_type_guid(ndd, nd_label, &nd_set->type_guid))
+ return ERR_PTR(-EAGAIN);
+ if (!nsl_validate_blk_isetcookie(ndd, nd_label, nd_set->cookie2))
+ return ERR_PTR(-EAGAIN);
nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
if (!nsblk)
@@ -2288,23 +2270,19 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
dev->type = &namespace_blk_device_type;
dev->parent = &nd_region->dev;
nsblk->id = -1;
- nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
- nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
- GFP_KERNEL);
- if (namespace_label_has(ndd, abstraction_guid))
- nsblk->common.claim_class
- = to_nvdimm_cclass(&nd_label->abstraction_guid);
+ nsblk->lbasize = nsl_get_lbasize(ndd, nd_label);
+ nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, GFP_KERNEL);
+ nsblk->common.claim_class = nsl_get_claim_class(ndd, nd_label);
if (!nsblk->uuid)
goto blk_err;
- memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
+ nsl_get_name(ndd, nd_label, name);
if (name[0]) {
- nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
- GFP_KERNEL);
+ nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, GFP_KERNEL);
if (!nsblk->alt_name)
goto blk_err;
}
res = nsblk_add_resource(nd_region, ndd, nsblk,
- __le64_to_cpu(nd_label->dpa));
+ nsl_get_dpa(ndd, nd_label));
if (!res)
goto blk_err;
nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
@@ -2345,6 +2323,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
struct device *dev, **devs = NULL;
struct nd_label_ent *label_ent, *e;
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
/* "safe" because create_namespace_pmem() might list_move() label_ent */
@@ -2355,7 +2334,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
if (!nd_label)
continue;
- flags = __le32_to_cpu(nd_label->flags);
+ flags = nsl_get_flags(ndd, nd_label);
if (is_nd_blk(&nd_region->dev)
== !!(flags & NSLABEL_FLAG_LOCAL))
/* pass, region matches label type */;
@@ -2363,9 +2342,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
continue;
/* skip labels that describe extents outside of the region */
- if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start ||
- __le64_to_cpu(nd_label->dpa) > map_end)
- continue;
+ if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
+ nsl_get_dpa(ndd, nd_label) > map_end)
+ continue;
i = add_namespace_resource(nd_region, nd_label, devs, count);
if (i < 0)
@@ -2381,13 +2360,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
if (is_nd_blk(&nd_region->dev))
dev = create_namespace_blk(nd_region, nd_label, count);
- else {
- struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
- struct nd_namespace_index *nsindex;
-
- nsindex = to_namespace_index(ndd, ndd->ns_current);
- dev = create_namespace_pmem(nd_region, nsindex, nd_label);
- }
+ else
+ dev = create_namespace_pmem(nd_region, nd_mapping,
+ nd_label);
if (IS_ERR(dev)) {
switch (PTR_ERR(dev)) {
@@ -2571,10 +2546,10 @@ static int init_active_labels(struct nd_region *nd_region)
break;
label = nd_label_active(ndd, j);
if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
- u32 flags = __le32_to_cpu(label->flags);
+ u32 flags = nsl_get_flags(ndd, label);
flags &= ~NSLABEL_FLAG_LOCAL;
- label->flags = __cpu_to_le32(flags);
+ nsl_set_flags(ndd, label, flags);
}
label_ent->label = label;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 696b55556d4d..5467ebbb4a6b 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -35,6 +35,156 @@ struct nvdimm_drvdata {
struct kref kref;
};
+static inline const u8 *nsl_ref_name(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return nd_label->name;
+}
+
+static inline u8 *nsl_get_name(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u8 *name)
+{
+ return memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
+}
+
+static inline u8 *nsl_set_name(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u8 *name)
+{
+ if (!name)
+ return NULL;
+ return memcpy(nd_label->name, name, NSLABEL_NAME_LEN);
+}
+
+static inline u32 nsl_get_slot(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le32_to_cpu(nd_label->slot);
+}
+
+static inline void nsl_set_slot(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u32 slot)
+{
+ nd_label->slot = __cpu_to_le32(slot);
+}
+
+static inline u64 nsl_get_checksum(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->checksum);
+}
+
+static inline void nsl_set_checksum(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 checksum)
+{
+ nd_label->checksum = __cpu_to_le64(checksum);
+}
+
+static inline u32 nsl_get_flags(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le32_to_cpu(nd_label->flags);
+}
+
+static inline void nsl_set_flags(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u32 flags)
+{
+ nd_label->flags = __cpu_to_le32(flags);
+}
+
+static inline u64 nsl_get_dpa(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->dpa);
+}
+
+static inline void nsl_set_dpa(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u64 dpa)
+{
+ nd_label->dpa = __cpu_to_le64(dpa);
+}
+
+static inline u64 nsl_get_rawsize(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->rawsize);
+}
+
+static inline void nsl_set_rawsize(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 rawsize)
+{
+ nd_label->rawsize = __cpu_to_le64(rawsize);
+}
+
+static inline u64 nsl_get_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->isetcookie);
+}
+
+static inline void nsl_set_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 isetcookie)
+{
+ nd_label->isetcookie = __cpu_to_le64(isetcookie);
+}
+
+static inline bool nsl_validate_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 cookie)
+{
+ return cookie == __le64_to_cpu(nd_label->isetcookie);
+}
+
+static inline u16 nsl_get_position(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le16_to_cpu(nd_label->position);
+}
+
+static inline void nsl_set_position(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u16 position)
+{
+ nd_label->position = __cpu_to_le16(position);
+}
+
+
+static inline u16 nsl_get_nlabel(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le16_to_cpu(nd_label->nlabel);
+}
+
+static inline void nsl_set_nlabel(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u16 nlabel)
+{
+ nd_label->nlabel = __cpu_to_le16(nlabel);
+}
+
+static inline u64 nsl_get_lbasize(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->lbasize);
+}
+
+static inline void nsl_set_lbasize(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 lbasize)
+{
+ nd_label->lbasize = __cpu_to_le64(lbasize);
+}
+
+bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 isetcookie);
+bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, guid_t *guid);
+enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label);
+
struct nd_region_data {
int ns_count;
int ns_active;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 1e0615b8565e..72de88ff0d30 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -450,11 +450,11 @@ static int pmem_attach_disk(struct device *dev,
pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
} else {
+ addr = devm_memremap(dev, pmem->phys_addr,
+ pmem->size, ARCH_MEMREMAP_PMEM);
if (devm_add_action_or_reset(dev, pmem_release_queue,
&pmem->pgmap))
return -ENOMEM;
- addr = devm_memremap(dev, pmem->phys_addr,
- pmem->size, ARCH_MEMREMAP_PMEM);
bb_range.start = res->start;
bb_range.end = res->end;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8679a108f571..7efb31b87f37 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -116,6 +116,8 @@ static struct class *nvme_ns_chr_class;
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid);
+static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd);
/*
* Prepare a queue for teardown.
@@ -1152,7 +1154,8 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return effects;
}
-static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
+static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+ struct nvme_command *cmd, int status)
{
if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
nvme_unfreeze(ctrl);
@@ -1167,6 +1170,26 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
nvme_queue_scan(ctrl);
flush_work(&ctrl->scan_work);
}
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_set_features:
+ switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
+ case NVME_FEAT_KATO:
+ /*
+ * Keep alive commands interval on the host should be
+ * updated when KATO is modified by Set Features
+ * commands.
+ */
+ if (!status)
+ nvme_update_keep_alive(ctrl, cmd);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
}
int nvme_execute_passthru_rq(struct request *rq)
@@ -1181,7 +1204,7 @@ int nvme_execute_passthru_rq(struct request *rq)
effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
ret = nvme_execute_rq(disk, rq, false);
if (effects) /* nothing to be done for zero cmd effects */
- nvme_passthru_end(ctrl, effects);
+ nvme_passthru_end(ctrl, effects, cmd, ret);
return ret;
}
@@ -1269,6 +1292,21 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
+static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd)
+{
+ unsigned int new_kato =
+ DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);
+
+ dev_info(ctrl->device,
+ "keep alive interval updated from %u ms to %u ms\n",
+ ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
+
+ nvme_stop_keep_alive(ctrl);
+ ctrl->kato = new_kato;
+ nvme_start_keep_alive(ctrl);
+}
+
/*
* In NVMe 1.0 the CNS field was just a binary controller or namespace
* flag, thus sending any new CNS opcodes has a big chance of not working.
@@ -1302,11 +1340,6 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
return error;
}
-static bool nvme_multi_css(struct nvme_ctrl *ctrl)
-{
- return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
-}
-
static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
struct nvme_ns_id_desc *cur, bool *csi_seen)
{
@@ -1874,6 +1907,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
goto out_unfreeze;
}
+ set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
@@ -1885,6 +1919,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (nvme_ns_head_multipath(ns->head)) {
blk_mq_freeze_queue(ns->head->disk->queue);
nvme_update_disk_info(ns->head->disk, ns, id);
+ nvme_mpath_revalidate_paths(ns);
blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0);
disk_update_readahead(ns->head->disk);
@@ -3763,7 +3798,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
nvme_get_ctrl(ctrl);
- device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
+ if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
+ goto out_cleanup_ns_from_list;
+
if (!nvme_ns_head_multipath(ns->head))
nvme_add_ns_cdev(ns);
@@ -3773,6 +3810,11 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
return;
+ out_cleanup_ns_from_list:
+ nvme_put_ctrl(ctrl);
+ down_write(&ctrl->namespaces_rwsem);
+ list_del_init(&ns->list);
+ up_write(&ctrl->namespaces_rwsem);
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
@@ -3795,6 +3837,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
+ clear_bit(NVME_NS_READY, &ns->flags);
set_capacity(ns->disk, 0);
nvme_fault_inject_fini(&ns->fault_inject);
@@ -3802,9 +3845,12 @@ static void nvme_ns_remove(struct nvme_ns *ns)
list_del_rcu(&ns->siblings);
mutex_unlock(&ns->ctrl->subsys->lock);
- synchronize_rcu(); /* guarantee not available in head->list */
- nvme_mpath_clear_current_path(ns);
- synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
+ /* guarantee not available in head->list */
+ synchronize_rcu();
+
+ /* wait for concurrent submissions */
+ if (nvme_mpath_clear_current_path(ns))
+ synchronize_srcu(&ns->head->srcu);
if (!nvme_ns_head_multipath(ns->head))
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 37ce3e8b1db2..5d7bc58a27bd 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -147,6 +147,21 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
mutex_unlock(&ctrl->scan_lock);
}
+void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
+{
+ struct nvme_ns_head *head = ns->head;
+ sector_t capacity = get_capacity(head->disk);
+ int node;
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ if (capacity != get_capacity(ns->disk))
+ clear_bit(NVME_NS_READY, &ns->flags);
+ }
+
+ for_each_node(node)
+ rcu_assign_pointer(head->current_path[node], NULL);
+}
+
static bool nvme_path_is_disabled(struct nvme_ns *ns)
{
/*
@@ -158,7 +173,7 @@ static bool nvme_path_is_disabled(struct nvme_ns *ns)
ns->ctrl->state != NVME_CTRL_DELETING)
return true;
if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
- test_bit(NVME_NS_REMOVING, &ns->flags))
+ !test_bit(NVME_NS_READY, &ns->flags))
return true;
return false;
}
@@ -465,6 +480,8 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
ctrl->subsys->instance, head->instance);
blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
+
/* set to a default value of 512 until the disk is validated */
blk_queue_logical_block_size(head->disk->queue, 512);
blk_set_stacking_limits(&head->disk->queue->limits);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a2e1f298b217..9871c0c9374c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -456,6 +456,7 @@ struct nvme_ns {
#define NVME_NS_DEAD 1
#define NVME_NS_ANA_PENDING 2
#define NVME_NS_FORCE_RO 3
+#define NVME_NS_READY 4
struct cdev cdev;
struct device cdev_device;
@@ -748,6 +749,7 @@ void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
+void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
@@ -795,6 +797,9 @@ static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
return false;
}
+static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
+{
+}
static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
}
@@ -887,4 +892,9 @@ struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
void nvme_put_ns(struct nvme_ns *ns);
+static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
+{
+ return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
+}
+
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 645025620154..e2ab12f3f51c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -45,6 +45,7 @@ struct nvme_tcp_request {
u32 pdu_len;
u32 pdu_sent;
u16 ttag;
+ __le16 status;
struct list_head entry;
struct llist_node lentry;
__le32 ddgst;
@@ -485,6 +486,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
struct nvme_completion *cqe)
{
+ struct nvme_tcp_request *req;
struct request *rq;
rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
@@ -496,7 +498,11 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
return -EINVAL;
}
- if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
+ req = blk_mq_rq_to_pdu(rq);
+ if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
+ req->status = cqe->status;
+
+ if (!nvme_try_complete_req(rq, req->status, cqe->result))
nvme_complete_rq(rq);
queue->nr_cqe++;
@@ -758,7 +764,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
} else {
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
- nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
+ nvme_tcp_end_request(rq,
+ le16_to_cpu(req->status));
queue->nr_cqe++;
}
nvme_tcp_init_recv_ctx(queue);
@@ -788,18 +795,24 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
return 0;
if (queue->recv_ddgst != queue->exp_ddgst) {
+ struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
+ pdu->command_id);
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+ req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
+
dev_err(queue->ctrl->ctrl.device,
"data digest error: recv %#x expected %#x\n",
le32_to_cpu(queue->recv_ddgst),
le32_to_cpu(queue->exp_ddgst));
- return -EIO;
}
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
pdu->command_id);
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
+ nvme_tcp_end_request(rq, le16_to_cpu(req->status));
queue->nr_cqe++;
}
@@ -2293,6 +2306,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
return ret;
req->state = NVME_TCP_SEND_CMD_PDU;
+ req->status = cpu_to_le16(NVME_SC_SUCCESS);
req->offset = 0;
req->data_sent = 0;
req->pdu_len = 0;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 0cb98f2bbc8c..aa6d84d8848e 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1015,7 +1015,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (unlikely(ret))
return ret;
- if (nvmet_req_passthru_ctrl(req))
+ if (nvmet_is_passthru_req(req))
return nvmet_parse_passthru_admin_cmd(req);
switch (cmd->common.opcode) {
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 273555127188..d784f3c200b4 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1028,7 +1028,7 @@ nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
}
/* passthru subsystems use the underlying controller's version */
- if (nvmet_passthru_ctrl(subsys))
+ if (nvmet_is_passthru_subsys(subsys))
return -EINVAL;
ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
@@ -1067,7 +1067,8 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
{
struct nvmet_subsys *subsys = to_subsys(item);
- return snprintf(page, PAGE_SIZE, "%s\n", subsys->serial);
+ return snprintf(page, PAGE_SIZE, "%*s\n",
+ NVMET_SN_MAX_SIZE, subsys->serial);
}
static ssize_t
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 66d05eecc2a9..b8425fa34300 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -553,7 +553,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
mutex_lock(&subsys->lock);
ret = 0;
- if (nvmet_passthru_ctrl(subsys)) {
+ if (nvmet_is_passthru_subsys(subsys)) {
pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
goto out_unlock;
}
@@ -869,7 +869,7 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
if (unlikely(ret))
return ret;
- if (nvmet_req_passthru_ctrl(req))
+ if (nvmet_is_passthru_req(req))
return nvmet_parse_passthru_io_cmd(req);
ret = nvmet_req_find_ns(req);
@@ -1206,6 +1206,9 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+
+ if (nvmet_is_passthru_subsys(ctrl->subsys))
+ nvmet_passthrough_override_cap(ctrl);
}
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
@@ -1363,8 +1366,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
goto out_put_subsystem;
mutex_init(&ctrl->lock);
- nvmet_init_cap(ctrl);
-
ctrl->port = req->port;
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
@@ -1378,6 +1379,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
+ nvmet_init_cap(ctrl);
WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 06dd3d537f07..7143c7fa7464 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -582,7 +582,7 @@ int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
-static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
+static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
{
return subsys->passthru_ctrl;
}
@@ -601,18 +601,19 @@ static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
{
return 0;
}
-static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
+static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
{
return NULL;
}
#endif /* CONFIG_NVME_TARGET_PASSTHRU */
-static inline struct nvme_ctrl *
-nvmet_req_passthru_ctrl(struct nvmet_req *req)
+static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
{
- return nvmet_passthru_ctrl(nvmet_req_subsys(req));
+ return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
}
+void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
+
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 225cd1ffbe45..f0efb3537989 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -20,6 +20,16 @@ MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
*/
static DEFINE_XARRAY(passthru_subsystems);
+void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
+{
+ /*
+ * Multiple command set support can only be declared if the underlying
+ * controller actually supports it.
+ */
+ if (!nvme_multi_css(ctrl->subsys->passthru_ctrl))
+ ctrl->cap &= ~(1ULL << 43);
+}
+
static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -218,7 +228,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
{
- struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
+ struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
struct request_queue *q = ctrl->admin_q;
struct nvme_ns *ns = NULL;
struct request *rq = NULL;
@@ -299,7 +309,7 @@ out:
*/
static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
{
- struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
+ struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
struct nvme_feat_host_behavior *host;
u16 status = NVME_SC_INTERNAL;
int ret;
diff --git a/drivers/of/device.c b/drivers/of/device.c
index c5a9473a5fb1..5b043ee30824 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -5,6 +5,7 @@
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_iommu.h>
+#include <linux/of_reserved_mem.h>
#include <linux/dma-direct.h> /* for bus_dma_region */
#include <linux/dma-map-ops.h>
#include <linux/init.h>
@@ -52,6 +53,42 @@ int of_device_add(struct platform_device *ofdev)
return device_add(&ofdev->dev);
}
+static void
+of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
+{
+ struct device_node *node, *of_node = dev->of_node;
+ int count, i;
+
+ if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL))
+ return;
+
+ count = of_property_count_elems_of_size(of_node, "memory-region",
+ sizeof(u32));
+ /*
+ * If dev->of_node doesn't exist or doesn't contain memory-region, try
+ * the OF node having DMA configuration.
+ */
+ if (count <= 0) {
+ of_node = np;
+ count = of_property_count_elems_of_size(
+ of_node, "memory-region", sizeof(u32));
+ }
+
+ for (i = 0; i < count; i++) {
+ node = of_parse_phandle(of_node, "memory-region", i);
+ /*
+ * There might be multiple memory regions, but only one
+ * restricted-dma-pool region is allowed.
+ */
+ if (of_device_is_compatible(node, "restricted-dma-pool") &&
+ of_device_is_available(node))
+ break;
+ }
+
+ if (i != count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
+ dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
+}
+
/**
* of_dma_configure_id - Setup DMA configuration
* @dev: Device to apply DMA configuration
@@ -165,6 +202,9 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
+ if (!iommu)
+ of_dma_set_restricted_buffer(dev, np);
+
return 0;
}
EXPORT_SYMBOL_GPL(of_dma_configure_id);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index fd3964d24224..59c1390cdf42 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -33,18 +33,22 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t *res_base)
{
phys_addr_t base;
+ int err = 0;
end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
align = !align ? SMP_CACHE_BYTES : align;
- base = memblock_find_in_range(start, end, size, align);
+ base = memblock_phys_alloc_range(size, align, start, end);
if (!base)
return -ENOMEM;
*res_base = base;
- if (nomap)
- return memblock_mark_nomap(base, size);
+ if (nomap) {
+ err = memblock_mark_nomap(base, size);
+ if (err)
+ memblock_free(base, size);
+ }
- return memblock_reserve(base, size);
+ return err;
}
/*
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 0c0dc2e369c0..3fd74bb34819 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1444,6 +1444,9 @@ static int of_fwnode_add_links(struct fwnode_handle *fwnode)
struct property *p;
struct device_node *con_np = to_of_node(fwnode);
+ if (IS_ENABLED(CONFIG_X86))
+ return 0;
+
if (!con_np)
return -EINVAL;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index b5f9ee81a46c..059566f54429 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -111,29 +111,29 @@
#define CMD_TLB_PURGE 33 /* IO_COMMAND to Purge I/O TLB entry */
struct ioa_registers {
- /* Runway Supervisory Set */
- int32_t unused1[12];
- uint32_t io_command; /* Offset 12 */
- uint32_t io_status; /* Offset 13 */
- uint32_t io_control; /* Offset 14 */
- int32_t unused2[1];
-
- /* Runway Auxiliary Register Set */
- uint32_t io_err_resp; /* Offset 0 */
- uint32_t io_err_info; /* Offset 1 */
- uint32_t io_err_req; /* Offset 2 */
- uint32_t io_err_resp_hi; /* Offset 3 */
- uint32_t io_tlb_entry_m; /* Offset 4 */
- uint32_t io_tlb_entry_l; /* Offset 5 */
- uint32_t unused3[1];
- uint32_t io_pdir_base; /* Offset 7 */
- uint32_t io_io_low_hv; /* Offset 8 */
- uint32_t io_io_high_hv; /* Offset 9 */
- uint32_t unused4[1];
- uint32_t io_chain_id_mask; /* Offset 11 */
- uint32_t unused5[2];
- uint32_t io_io_low; /* Offset 14 */
- uint32_t io_io_high; /* Offset 15 */
+ /* Runway Supervisory Set */
+ int32_t unused1[12];
+ uint32_t io_command; /* Offset 12 */
+ uint32_t io_status; /* Offset 13 */
+ uint32_t io_control; /* Offset 14 */
+ int32_t unused2[1];
+
+ /* Runway Auxiliary Register Set */
+ uint32_t io_err_resp; /* Offset 0 */
+ uint32_t io_err_info; /* Offset 1 */
+ uint32_t io_err_req; /* Offset 2 */
+ uint32_t io_err_resp_hi; /* Offset 3 */
+ uint32_t io_tlb_entry_m; /* Offset 4 */
+ uint32_t io_tlb_entry_l; /* Offset 5 */
+ uint32_t unused3[1];
+ uint32_t io_pdir_base; /* Offset 7 */
+ uint32_t io_io_low_hv; /* Offset 8 */
+ uint32_t io_io_high_hv; /* Offset 9 */
+ uint32_t unused4[1];
+ uint32_t io_chain_id_mask; /* Offset 11 */
+ uint32_t unused5[2];
+ uint32_t io_io_low; /* Offset 14 */
+ uint32_t io_io_high; /* Offset 15 */
};
/*
@@ -198,7 +198,7 @@ struct ioa_registers {
** In order for a Runway address to reside within GSC+ extended address space:
** Runway Address [0:7] must identically compare to 8'b11111111
** Runway Address [8:11] must be equal to IO_IO_LOW(_HV)[16:19]
-** Runway Address [12:23] must be greater than or equal to
+** Runway Address [12:23] must be greater than or equal to
** IO_IO_LOW(_HV)[20:31] and less than IO_IO_HIGH(_HV)[20:31].
** Runway Address [24:39] is not used in the comparison.
**
@@ -226,10 +226,10 @@ struct ioc {
struct ioa_registers __iomem *ioc_regs; /* I/O MMU base address */
u8 *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */
- u32 pdir_size; /* bytes, function of IOV Space size */
- u32 res_hint; /* next available IOVP -
+ u32 pdir_size; /* bytes, function of IOV Space size */
+ u32 res_hint; /* next available IOVP -
circular search */
- u32 res_size; /* size of resource map in bytes */
+ u32 res_size; /* size of resource map in bytes */
spinlock_t res_lock;
#ifdef CCIO_COLLECT_STATS
@@ -249,7 +249,7 @@ struct ioc {
unsigned short cujo20_bug;
/* STUFF We don't need in performance path */
- u32 chainid_shift; /* specify bit location of chain_id */
+ u32 chainid_shift; /* specify bit location of chain_id */
struct ioc *next; /* Linked list of discovered iocs */
const char *name; /* device name from firmware */
unsigned int hw_path; /* the hardware path this ioc is associatd with */
@@ -293,7 +293,7 @@ static int ioc_count;
** cause the kernel to panic anyhow.
*/
#define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
- for(; res_ptr < res_end; ++res_ptr) { \
+ for (; res_ptr < res_end; ++res_ptr) { \
int ret;\
unsigned int idx;\
idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
@@ -309,9 +309,9 @@ static int ioc_count;
#define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
- CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \
- res_ptr = (u##size *)&(ioc)->res_map[0]; \
- CCIO_SEARCH_LOOP(ioa, res_idx, mask, size);
+ CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \
+ res_ptr = (u##size *)&(ioc)->res_map[0]; \
+ CCIO_SEARCH_LOOP(ioa, res_idx, mask, size);
/*
** Find available bit in this ioa's resource map.
@@ -348,9 +348,9 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
BUG_ON(pages_needed == 0);
BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
-
- DBG_RES("%s() size: %d pages_needed %d\n",
- __func__, size, pages_needed);
+
+ DBG_RES("%s() size: %d pages_needed %d\n",
+ __func__, size, pages_needed);
/*
** "seek and ye shall find"...praying never hurts either...
@@ -416,7 +416,7 @@ resource_found:
#define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
BUG_ON((*res_ptr & mask) != mask); \
- *res_ptr &= ~(mask);
+ *res_ptr &= ~(mask);
/**
* ccio_free_range - Free pages from the ioc's resource map.
@@ -518,9 +518,9 @@ typedef unsigned long space_t;
** when it passes in BIDIRECTIONAL flag.
*/
static u32 hint_lookup[] = {
- [PCI_DMA_BIDIRECTIONAL] = HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID,
- [PCI_DMA_TODEVICE] = HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID,
- [PCI_DMA_FROMDEVICE] = HINT_STOP_MOST | IOPDIR_VALID,
+ [DMA_BIDIRECTIONAL] = HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID,
+ [DMA_TO_DEVICE] = HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID,
+ [DMA_FROM_DEVICE] = HINT_STOP_MOST | IOPDIR_VALID,
};
/**
@@ -845,7 +845,7 @@ static void *
ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
- void *ret;
+ void *ret;
#if 0
/* GRANT Need to establish hierarchy for non-PCI devs as well
** and then provide matching gsc_map_xxx() functions for them as well.
@@ -856,11 +856,11 @@ ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
return 0;
}
#endif
- ret = (void *) __get_free_pages(flag, get_order(size));
+ ret = (void *) __get_free_pages(flag, get_order(size));
if (ret) {
memset(ret, 0, size);
- *dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL);
+ *dma_handle = ccio_map_single(dev, ret, size, DMA_BIDIRECTIONAL);
}
return ret;
@@ -918,7 +918,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc)
- return 0;
+ return -EINVAL;
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
@@ -1022,8 +1022,8 @@ static const struct dma_map_ops ccio_ops = {
.free = ccio_free,
.map_page = ccio_map_page,
.unmap_page = ccio_unmap_page,
- .map_sg = ccio_map_sg,
- .unmap_sg = ccio_unmap_sg,
+ .map_sg = ccio_map_sg,
+ .unmap_sg = ccio_unmap_sg,
.get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
@@ -1080,7 +1080,7 @@ static int ccio_proc_info(struct seq_file *m, void *p)
max = ioc->usingle_pages - ioc->usg_pages;
seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
min, max, (int)((max * 1000)/min));
-
+
seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
ioc->msg_calls, ioc->msg_pages,
(int)((ioc->msg_pages * 1000)/ioc->msg_calls));
@@ -1169,7 +1169,7 @@ void __init ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
idx = PDIR_INDEX(iovp) >> 3;
while (idx < ioc->res_size) {
- res_ptr[idx] |= 0xff;
+ res_ptr[idx] |= 0xff;
idx += PDIR_INDEX(CUJO_20_STEP) >> 3;
}
}
@@ -1297,7 +1297,7 @@ ccio_ioc_init(struct ioc *ioc)
DBG_INIT(" base %p\n", ioc->pdir_base);
/* resource map size dictated by pdir_size */
- ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
+ ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 889d7ce282eb..952a92504df6 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -156,15 +156,6 @@ static inline struct dino_device *DINO_DEV(struct pci_hba_data *hba)
return container_of(hba, struct dino_device, hba);
}
-/* Check if PCI device is behind a Card-mode Dino. */
-static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
-{
- struct dino_device *dino_dev;
-
- dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
- return is_card_dino(&dino_dev->hba.dev->id);
-}
-
/*
* Dino Configuration Space Accessor Functions
*/
@@ -447,6 +438,15 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev)
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
#ifdef CONFIG_TULIP
+/* Check if PCI device is behind a Card-mode Dino. */
+static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
+{
+ struct dino_device *dino_dev;
+
+ dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
+ return is_card_dino(&dino_dev->hba.dev->id);
+}
+
static void pci_fixup_tulip(struct pci_dev *dev)
{
if (!pci_dev_is_behind_card_dino(dev))
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 36c6613f7a36..cf91cb024be3 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -250,14 +250,14 @@ static int __init led_create_procfs(void)
if (!lcd_no_led_support)
{
- ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root,
+ ent = proc_create_data("led", 0644, proc_pdc_root,
&led_proc_ops, (void *)LED_NOLCD); /* LED */
if (!ent) return -1;
}
if (led_type == LED_HASLCD)
{
- ent = proc_create_data("lcd", S_IRUGO|S_IWUSR, proc_pdc_root,
+ ent = proc_create_data("lcd", 0644, proc_pdc_root,
&led_proc_ops, (void *)LED_HASLCD); /* LCD */
if (!ent) return -1;
}
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index dce4cdf786cd..e60690d38d67 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -947,7 +947,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
ioc = GET_IOC(dev);
if (!ioc)
- return 0;
+ return -EINVAL;
/* Fast path single entry scatterlists. */
if (nents == 1) {
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index 1e43b3f399a8..0dcc497b0449 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -378,7 +378,7 @@ static int __init parport_init_chip(struct parisc_device *dev)
return 0;
}
-static int __exit parport_remove_chip(struct parisc_device *dev)
+static void __exit parport_remove_chip(struct parisc_device *dev)
{
struct parport *p = dev_get_drvdata(&dev->dev);
if (p) {
@@ -390,14 +390,12 @@ static int __exit parport_remove_chip(struct parisc_device *dev)
if (p->irq != PARPORT_IRQ_NONE)
free_irq(p->irq, p);
if (priv->dma_buf)
- pci_free_consistent(priv->dev, PAGE_SIZE,
- priv->dma_buf,
- priv->dma_handle);
+ dma_free_coherent(&priv->dev->dev, PAGE_SIZE,
+ priv->dma_buf, priv->dma_handle);
kfree (p->private_data);
parport_put_port(p);
kfree (ops); /* hope no-one cached it */
}
- return 0;
}
static const struct parisc_device_id parport_tbl[] __initconst = {
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 6d7d64939f82..c967ad6e2626 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -376,7 +376,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
if (WARN_ON(pdev->pasid_enabled))
return -EBUSY;
- if (!pdev->eetlp_prefix_path)
+ if (!pdev->eetlp_prefix_path && !pdev->pasid_no_tlp)
return -EINVAL;
if (!pasid)
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 5e1e3796efa4..326f7d13024f 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -40,6 +40,7 @@ config PCI_FTPCI100
config PCI_IXP4XX
bool "Intel IXP4xx PCI controller"
depends on ARM && OF
+ depends on ARCH_IXP4XX || COMPILE_TEST
default ARCH_IXP4XX
help
Say Y here if you want support for the PCI host controller found
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 35e61048e133..ffb176d288cd 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -27,6 +27,7 @@
#define STATUS_REG_SYS_2 0x508
#define STATUS_CLR_REG_SYS_2 0x708
#define LINK_DOWN BIT(1)
+#define J7200_LINK_DOWN BIT(10)
#define J721E_PCIE_USER_CMD_STATUS 0x4
#define LINK_TRAINING_ENABLE BIT(0)
@@ -57,6 +58,7 @@ struct j721e_pcie {
struct cdns_pcie *cdns_pcie;
void __iomem *user_cfg_base;
void __iomem *intd_cfg_base;
+ u32 linkdown_irq_regfield;
};
enum j721e_pcie_mode {
@@ -66,7 +68,10 @@ enum j721e_pcie_mode {
struct j721e_pcie_data {
enum j721e_pcie_mode mode;
- bool quirk_retrain_flag;
+ unsigned int quirk_retrain_flag:1;
+ unsigned int quirk_detect_quiet_flag:1;
+ u32 linkdown_irq_regfield;
+ unsigned int byte_access_allowed:1;
};
static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
@@ -98,12 +103,12 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
u32 reg;
reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2);
- if (!(reg & LINK_DOWN))
+ if (!(reg & pcie->linkdown_irq_regfield))
return IRQ_NONE;
dev_err(dev, "LINK DOWN!\n");
- j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, LINK_DOWN);
+ j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, pcie->linkdown_irq_regfield);
return IRQ_HANDLED;
}
@@ -112,7 +117,7 @@ static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
u32 reg;
reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2);
- reg |= LINK_DOWN;
+ reg |= pcie->linkdown_irq_regfield;
j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg);
}
@@ -284,10 +289,36 @@ static struct pci_ops cdns_ti_pcie_host_ops = {
static const struct j721e_pcie_data j721e_pcie_rc_data = {
.mode = PCI_MODE_RC,
.quirk_retrain_flag = true,
+ .byte_access_allowed = false,
+ .linkdown_irq_regfield = LINK_DOWN,
};
static const struct j721e_pcie_data j721e_pcie_ep_data = {
.mode = PCI_MODE_EP,
+ .linkdown_irq_regfield = LINK_DOWN,
+};
+
+static const struct j721e_pcie_data j7200_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .quirk_detect_quiet_flag = true,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .byte_access_allowed = true,
+};
+
+static const struct j721e_pcie_data j7200_pcie_ep_data = {
+ .mode = PCI_MODE_EP,
+ .quirk_detect_quiet_flag = true,
+};
+
+static const struct j721e_pcie_data am64_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .byte_access_allowed = true,
+};
+
+static const struct j721e_pcie_data am64_pcie_ep_data = {
+ .mode = PCI_MODE_EP,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
};
static const struct of_device_id of_j721e_pcie_match[] = {
@@ -299,6 +330,22 @@ static const struct of_device_id of_j721e_pcie_match[] = {
.compatible = "ti,j721e-pcie-ep",
.data = &j721e_pcie_ep_data,
},
+ {
+ .compatible = "ti,j7200-pcie-host",
+ .data = &j7200_pcie_rc_data,
+ },
+ {
+ .compatible = "ti,j7200-pcie-ep",
+ .data = &j7200_pcie_ep_data,
+ },
+ {
+ .compatible = "ti,am64-pcie-host",
+ .data = &am64_pcie_rc_data,
+ },
+ {
+ .compatible = "ti,am64-pcie-ep",
+ .data = &am64_pcie_ep_data,
+ },
{},
};
@@ -332,6 +379,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
pcie->mode = mode;
+ pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg");
if (IS_ERR(base))
@@ -391,9 +439,11 @@ static int j721e_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- bridge->ops = &cdns_ti_pcie_host_ops;
+ if (!data->byte_access_allowed)
+ bridge->ops = &cdns_ti_pcie_host_ops;
rc = pci_host_bridge_priv(bridge);
rc->quirk_retrain_flag = data->quirk_retrain_flag;
+ rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
cdns_pcie = &rc->pcie;
cdns_pcie->dev = dev;
@@ -459,6 +509,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err_get_sync;
}
+ ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
cdns_pcie = &ep->pcie;
cdns_pcie->dev = dev;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 897cdde02bd8..88e05b9c2e5b 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -16,11 +16,37 @@
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
-static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn)
+{
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
+ u32 first_vf_offset, stride;
+
+ if (vfn == 0)
+ return fn;
+
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+
+ return fn;
+}
+
+static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
+ u32 reg;
+
+ if (vfn > 1) {
+ dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n");
+ return -EINVAL;
+ } else if (vfn == 1) {
+ reg = cap + PCI_SRIOV_VF_DID;
+ cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid);
+ return 0;
+ }
cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
@@ -47,7 +73,7 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -92,32 +118,36 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
addr0 = lower_32_bits(bar_phys);
addr1 = upper_32_bits(bar_phys);
+
+ if (vfn == 1)
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
+ else
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
+ b = (bar < BAR_4) ? bar : bar - BAR_4;
+
+ if (vfn == 0 || vfn == 1) {
+ cfg = cdns_pcie_readl(pcie, reg);
+ cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
+ cdns_pcie_writel(pcie, reg, cfg);
+ }
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
addr1);
- if (bar < BAR_4) {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
- b = bar;
- } else {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
- b = bar - BAR_4;
- }
-
- cfg = cdns_pcie_readl(pcie, reg);
- cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
- cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
- cdns_pcie_writel(pcie, reg, cfg);
-
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
epf->epf_bar[bar] = epf_bar;
return 0;
}
-static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -126,29 +156,32 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
enum pci_barno bar = epf_bar->barno;
u32 reg, cfg, b, ctrl;
- if (bar < BAR_4) {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
- b = bar;
- } else {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
- b = bar - BAR_4;
+ if (vfn == 1)
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
+ else
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
+ b = (bar < BAR_4) ? bar : bar - BAR_4;
+
+ if (vfn == 0 || vfn == 1) {
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+ cfg = cdns_pcie_readl(pcie, reg);
+ cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
+ cdns_pcie_writel(pcie, reg, cfg);
}
- ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
- cfg = cdns_pcie_readl(pcie, reg);
- cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
- cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
- cdns_pcie_writel(pcie, reg, cfg);
-
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
epf->epf_bar[bar] = NULL;
}
-static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
- u64 pci_addr, size_t size)
+static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr, u64 pci_addr, size_t size)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
@@ -161,6 +194,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
return -EINVAL;
}
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);
set_bit(r, &ep->ob_region_map);
@@ -169,7 +203,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
return 0;
}
-static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -189,13 +223,15 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
clear_bit(r, &ep->ob_region_map);
}
-static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
+static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/*
* Set the Multiple Message Capable bitfield into the Message Control
* register.
@@ -209,13 +245,15 @@ static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
return 0;
}
-static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/* Validate that the MSI feature is actually enabled. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
if (!(flags & PCI_MSI_FLAGS_ENABLE))
@@ -230,13 +268,15 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
return mme;
}
-static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 val, reg;
+ func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no);
+
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, func_no, reg);
if (!(val & PCI_MSIX_FLAGS_ENABLE))
@@ -247,14 +287,17 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
return val;
}
-static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts,
- enum pci_barno bir, u32 offset)
+static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
+ u16 interrupts, enum pci_barno bir,
+ u32 offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 val, reg;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
@@ -274,8 +317,8 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts,
return 0;
}
-static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
- u8 intx, bool is_asserted)
+static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
+ bool is_asserted)
{
struct cdns_pcie *pcie = &ep->pcie;
unsigned long flags;
@@ -317,7 +360,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
writel(0, ep->irq_cpu_addr + offset);
}
-static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
+static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
+ u8 intx)
{
u16 cmd;
@@ -334,7 +378,7 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
return 0;
}
-static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
+static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u8 interrupt_num)
{
struct cdns_pcie *pcie = &ep->pcie;
@@ -343,6 +387,8 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
u8 msi_count;
u64 pci_addr, pci_addr_mask = 0xff;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/* Check whether the MSI feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
if (!(flags & PCI_MSI_FLAGS_ENABLE))
@@ -382,7 +428,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
return 0;
}
-static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data,
u32 *msi_addr_offset)
@@ -396,6 +442,8 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
int ret;
int i;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/* Check whether the MSI feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
if (!(flags & PCI_MSI_FLAGS_ENABLE))
@@ -419,7 +467,7 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
pci_addr &= GENMASK_ULL(63, 2);
for (i = 0; i < interrupt_num; i++) {
- ret = cdns_pcie_ep_map_addr(epc, fn, addr,
+ ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr,
pci_addr & ~pci_addr_mask,
entry_size);
if (ret)
@@ -433,7 +481,7 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
return 0;
}
-static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
+static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u16 interrupt_num)
{
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
@@ -446,6 +494,12 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
u16 flags;
u8 bir;
+ epf = &ep->epf[fn];
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/* Check whether the MSI-X feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS);
if (!(flags & PCI_MSIX_FLAGS_ENABLE))
@@ -456,7 +510,6 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
bir = tbl_offset & PCI_MSIX_TABLE_BIR;
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
- epf = &ep->epf[fn];
msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
@@ -478,21 +531,27 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
return 0;
}
-static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ struct device *dev = pcie->dev;
switch (type) {
case PCI_EPC_IRQ_LEGACY:
- return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
+ if (vfn > 0) {
+ dev_err(dev, "Cannot raise legacy interrupts for VF\n");
+ return -EINVAL;
+ }
+ return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
case PCI_EPC_IRQ_MSI:
- return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
+ return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num);
case PCI_EPC_IRQ_MSIX:
- return cdns_pcie_ep_send_msix_irq(ep, fn, interrupt_num);
+ return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num);
default:
break;
@@ -523,6 +582,13 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
return 0;
}
+static const struct pci_epc_features cdns_pcie_epc_vf_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = 65536,
+};
+
static const struct pci_epc_features cdns_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
@@ -531,9 +597,12 @@ static const struct pci_epc_features cdns_pcie_epc_features = {
};
static const struct pci_epc_features*
-cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
- return &cdns_pcie_epc_features;
+ if (!vfunc_no)
+ return &cdns_pcie_epc_features;
+
+ return &cdns_pcie_epc_vf_features;
}
static const struct pci_epc_ops cdns_pcie_epc_ops = {
@@ -559,9 +628,11 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
struct cdns_pcie *pcie = &ep->pcie;
+ struct cdns_pcie_epf *epf;
struct resource *res;
struct pci_epc *epc;
int ret;
+ int i;
pcie->is_rc = false;
@@ -606,6 +677,25 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
if (!ep->epf)
return -ENOMEM;
+ epc->max_vfs = devm_kcalloc(dev, epc->max_functions,
+ sizeof(*epc->max_vfs), GFP_KERNEL);
+ if (!epc->max_vfs)
+ return -ENOMEM;
+
+ ret = of_property_read_u8_array(np, "max-virtual-functions",
+ epc->max_vfs, epc->max_functions);
+ if (ret == 0) {
+ for (i = 0; i < epc->max_functions; i++) {
+ epf = &ep->epf[i];
+ if (epc->max_vfs[i] == 0)
+ continue;
+ epf->epf = devm_kcalloc(dev, epc->max_vfs[i],
+ sizeof(*ep->epf), GFP_KERNEL);
+ if (!epf->epf)
+ return -ENOMEM;
+ }
+ }
+
ret = pci_epc_mem_init(epc, pcie->mem_res->start,
resource_size(pcie->mem_res), PAGE_SIZE);
if (ret < 0) {
@@ -623,6 +713,10 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
/* Reserve region 0 for IRQs */
set_bit(0, &ep->ob_region_map);
+
+ if (ep->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
+
spin_lock_init(&ep->lock);
return 0;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index ae1c55503513..fb96d37a135c 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -498,6 +498,9 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return PTR_ERR(rc->cfg_base);
rc->cfg_res = res;
+ if (rc->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+
ret = cdns_pcie_start_link(pcie);
if (ret) {
dev_err(dev, "Failed to start link\n");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 3c3646502d05..52767f26048f 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -7,6 +7,22 @@
#include "pcie-cadence.h"
+void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
+{
+ u32 delay = 0x3;
+ u32 ltssm_control_cap;
+
+ /*
+ * Set the LTSSM Detect Quiet state min. delay to 2ms.
+ */
+ ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
+ ltssm_control_cap = ((ltssm_control_cap &
+ ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
+ CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
+}
+
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
u32 r, bool is_io,
u64 cpu_addr, u64 pci_addr, size_t size)
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 30db2d68c17a..262421e5d917 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/pci-epf.h>
#include <linux/phy/phy.h>
/* Parameters for the waiting for link up routine */
@@ -46,10 +47,18 @@
#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
/* Endpoint Function f BAR b Configuration Registers */
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn))
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
(CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
(CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn))
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008)
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
(GENMASK(4, 0) << ((b) * 8))
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
@@ -114,6 +123,7 @@
#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
+#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
/*
* Root Port Registers (PCI configuration space for the root port function)
@@ -189,6 +199,14 @@
/* AXI link down register */
#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
+/* LTSSM Capabilities register */
+#define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
+ (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
+ CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
+
enum cdns_pcie_rp_bar {
RP_BAR_UNDEFINED = -1,
RP_BAR0,
@@ -295,6 +313,7 @@ struct cdns_pcie {
* @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
* available
* @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
+ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
*/
struct cdns_pcie_rc {
struct cdns_pcie pcie;
@@ -303,14 +322,17 @@ struct cdns_pcie_rc {
u32 vendor_id;
u32 device_id;
bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
- bool quirk_retrain_flag;
+ unsigned int quirk_retrain_flag:1;
+ unsigned int quirk_detect_quiet_flag:1;
};
/**
* struct cdns_pcie_epf - Structure to hold info about endpoint function
+ * @epf: Info about virtual functions attached to the physical function
* @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
*/
struct cdns_pcie_epf {
+ struct cdns_pcie_epf *epf;
struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
};
@@ -334,6 +356,7 @@ struct cdns_pcie_epf {
* registers fields (RMW) accessible by both remote RC and EP to
* minimize time between read and write
* @epf: Structure to hold info about endpoint function
+ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
*/
struct cdns_pcie_ep {
struct cdns_pcie pcie;
@@ -348,6 +371,7 @@ struct cdns_pcie_ep {
/* protect writing to PCI_STATUS while raising legacy interrupts */
spinlock_t lock;
struct cdns_pcie_epf *epf;
+ unsigned int quirk_detect_quiet_flag:1;
};
@@ -508,6 +532,9 @@ static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
return 0;
}
#endif
+
+void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
+
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
u32 r, bool is_io,
u64 cpu_addr, u64 pci_addr, size_t size);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 423d35872ce4..76c0a63a3f64 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -214,6 +214,17 @@ config PCIE_ARTPEC6_EP
Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
endpoint mode. This uses the DesignWare core.
+config PCIE_ROCKCHIP_DW_HOST
+ bool "Rockchip DesignWare PCIe controller"
+ select PCIE_DW
+ select PCIE_DW_HOST
+ depends on PCI_MSI_IRQ_DOMAIN
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ help
+ Enables support for the DesignWare PCIe controller in the
+ Rockchip SoC except RK3399.
+
config PCIE_INTEL_GW
bool "Intel Gateway PCIe host controller support"
depends on OF && (X86 || COMPILE_TEST)
@@ -225,6 +236,34 @@ config PCIE_INTEL_GW
The PCIe controller uses the DesignWare core plus Intel-specific
hardware wrappers.
+config PCIE_KEEMBAY
+ bool
+
+config PCIE_KEEMBAY_HOST
+ bool "Intel Keem Bay PCIe controller - Host mode"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ select PCIE_KEEMBAY
+ help
+ Say 'Y' here to enable support for the PCIe controller in Keem Bay
+ to work in host mode.
+ The PCIe controller is based on DesignWare Hardware and uses
+ DesignWare core functions.
+
+config PCIE_KEEMBAY_EP
+ bool "Intel Keem Bay PCIe controller - Endpoint mode"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_KEEMBAY
+ help
+ Say 'Y' here to enable support for the PCIe controller in Keem Bay
+ to work in endpoint mode.
+ The PCIe controller is based on DesignWare Hardware and uses
+ DesignWare core functions.
+
config PCIE_KIRIN
depends on OF && (ARM64 || COMPILE_TEST)
bool "HiSilicon Kirin series SoCs PCIe controllers"
@@ -286,6 +325,15 @@ config PCIE_TEGRA194_EP
in order to enable device-specific features PCIE_TEGRA194_EP must be
selected. This uses the DesignWare core.
+config PCIE_VISCONTI_HOST
+ bool "Toshiba Visconti PCIe controllers"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support on Toshiba Visconti SoC.
+ This driver supports TMPV7708 SoC.
+
config PCIE_UNIPHIER
bool "Socionext UniPhier PCIe host controllers"
depends on ARCH_UNIPHIER || COMPILE_TEST
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 9e6ce0dc2f53..73244409792c 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -14,13 +14,16 @@ obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_ROCKCHIP_DW_HOST) += pcie-dw-rockchip.o
obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
+obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
obj-$(CONFIG_PCI_MESON) += pci-meson.o
obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
+obj-$(CONFIG_PCIE_VISCONTI_HOST) += pcie-visconti.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 047cfbdc1330..fbbb78f6885e 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -204,7 +204,7 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned long val;
- int pos, irq;
+ int pos;
val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
(index * MSI_REG_CTRL_BLOCK_SIZE));
@@ -213,9 +213,8 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0);
while (pos != MAX_MSI_IRQS_PER_CTRL) {
- irq = irq_find_mapping(pp->irq_domain,
- (index * MAX_MSI_IRQS_PER_CTRL) + pos);
- generic_handle_irq(irq);
+ generic_handle_domain_irq(pp->irq_domain,
+ (index * MAX_MSI_IRQS_PER_CTRL) + pos);
pos++;
pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos);
}
@@ -257,7 +256,7 @@ static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
struct dw_pcie *pci;
struct pcie_port *pp;
unsigned long reg;
- u32 virq, bit;
+ u32 bit;
chained_irq_enter(chip, desc);
@@ -276,11 +275,8 @@ static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
case INTB:
case INTC:
case INTD:
- for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
- virq = irq_find_mapping(dra7xx->irq_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(dra7xx->irq_domain, bit);
break;
}
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index bde3b2824e89..865258d8c53c 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -259,14 +259,12 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
u32 pending;
- int virq;
pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
if (BIT(0) & pending) {
- virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
- dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
- generic_handle_irq(virq);
+ dev_dbg(dev, ": irq: irq_offset %d", offset);
+ generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset);
}
/* EOI the INTx interrupt */
@@ -579,7 +577,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
struct pcie_port *pp = &pci->pp;
struct device *dev = pci->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
- u32 vector, virq, reg, pos;
+ u32 vector, reg, pos;
dev_dbg(dev, "%s, irq %d\n", __func__, irq);
@@ -600,10 +598,8 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
continue;
vector = offset + (pos << 3);
- virq = irq_linear_revmap(pp->irq_domain, vector);
- dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector,
- virq);
- generic_handle_irq(virq);
+ dev_dbg(dev, "irq: bit %d, vector %d\n", pos, vector);
+ generic_handle_domain_irq(pp->irq_domain, vector);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 597c282f586c..c91fc1954432 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -384,6 +384,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
const struct artpec_pcie_of_data *data;
enum artpec_pcie_variants variant;
enum dw_pcie_device_mode mode;
+ u32 val;
match = of_match_device(artpec6_pcie_of_match, dev);
if (!match)
@@ -432,9 +433,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
break;
- case DW_PCIE_EP_TYPE: {
- u32 val;
-
+ case DW_PCIE_EP_TYPE:
if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP))
return -ENODEV;
@@ -445,8 +444,6 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
return dw_pcie_ep_init(&pci->ep);
- break;
- }
default:
dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 8d028a88b375..998b698f4085 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -125,7 +125,7 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
-static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -202,7 +202,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
-static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -217,7 +217,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
ep->epf_bar[bar] = NULL;
}
-static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int ret;
@@ -276,7 +276,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
return -EINVAL;
}
-static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr)
{
int ret;
@@ -292,9 +292,8 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
clear_bit(atu_index, ep->ob_window_map);
}
-static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
- phys_addr_t addr,
- u64 pci_addr, size_t size)
+static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr, u64 pci_addr, size_t size)
{
int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -309,7 +308,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
return 0;
}
-static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
+static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -333,7 +332,8 @@ static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -358,7 +358,7 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
return 0;
}
-static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -382,8 +382,8 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno bir, u32 offset)
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -418,7 +418,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
return 0;
}
-static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -450,7 +450,7 @@ static int dw_pcie_ep_start(struct pci_epc *epc)
}
static const struct pci_epc_features*
-dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -525,14 +525,14 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
msg_addr = ((u64)msg_addr_upper) << 32 |
(msg_addr_lower & ~aligned_offset);
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
return ret;
writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
- dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
@@ -593,14 +593,14 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
}
aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
return ret;
writel(msg_data, ep->msi_mem + aligned_offset);
- dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index a608ae1fad57..d1d9b8344ec9 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -55,7 +55,7 @@ static struct msi_domain_info dw_pcie_msi_domain_info = {
/* MSI int handler */
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
- int i, pos, irq;
+ int i, pos;
unsigned long val;
u32 status, num_ctrls;
irqreturn_t ret = IRQ_NONE;
@@ -74,10 +74,9 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
pos = 0;
while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
pos)) != MAX_MSI_IRQS_PER_CTRL) {
- irq = irq_find_mapping(pp->irq_domain,
- (i * MAX_MSI_IRQS_PER_CTRL) +
- pos);
- generic_handle_irq(irq);
+ generic_handle_domain_irq(pp->irq_domain,
+ (i * MAX_MSI_IRQS_PER_CTRL) +
+ pos);
pos++;
}
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 9b397c807261..8851eb161a0e 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -164,7 +164,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
return dw_pcie_ep_init(&pci->ep);
- break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
new file mode 100644
index 000000000000..c9b341e55cbb
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Rockchip SoCs.
+ *
+ * Copyright (C) 2021 Rockchip Electronics Co., Ltd.
+ * http://www.rock-chips.com
+ *
+ * Author: Simon Xue <xxm@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write
+ * mask for the lower 16 bits.
+ */
+#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
+#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+
+#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
+#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
+#define PCIE_SMLH_LINKUP BIT(16)
+#define PCIE_RDLH_LINKUP BIT(17)
+#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
+#define PCIE_L0S_ENTRY 0x11
+#define PCIE_CLIENT_GENERAL_CONTROL 0x0
+#define PCIE_CLIENT_GENERAL_DEBUG 0x104
+#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_CLIENT_LTSSM_STATUS 0x300
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
+
+struct rockchip_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct reset_control *rst;
+ struct gpio_desc *rst_gpio;
+ struct regulator *vpcie3v3;
+};
+
+static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
+ u32 reg)
+{
+ return readl_relaxed(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
+ u32 val, u32 reg)
+{
+ writel_relaxed(val, rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
+ PCIE_CLIENT_GENERAL_CONTROL);
+}
+
+static int rockchip_pcie_link_up(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+
+ if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
+ (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
+ return 1;
+
+ return 0;
+}
+
+static int rockchip_pcie_start_link(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ /* Reset device */
+ gpiod_set_value_cansleep(rockchip->rst_gpio, 0);
+
+ rockchip_pcie_enable_ltssm(rockchip);
+
+ /*
+ * PCIe requires the refclk to be stable for 100µs prior to releasing
+ * PERST. See table 2-4 in section 2.6.2 AC Specifications of the PCI
+ * Express Card Electromechanical Specification, 1.1. However, we don't
+ * know if the refclk is coming from RC's PHY or external OSC. If it's
+ * from RC, so enabling LTSSM is the just right place to release #PERST.
+ * We need more extra time as before, rather than setting just
+ * 100us as we don't know how long should the device need to reset.
+ */
+ msleep(100);
+ gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
+
+ return 0;
+}
+
+static int rockchip_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+
+ /* LTSSM enable control mode */
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
+ PCIE_CLIENT_GENERAL_CONTROL);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
+ .host_init = rockchip_pcie_host_init,
+};
+
+static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ int ret;
+
+ ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
+ if (ret < 0)
+ return ret;
+
+ rockchip->clk_cnt = ret;
+
+ return clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+}
+
+static int rockchip_pcie_resource_get(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(rockchip->apb_base))
+ return PTR_ERR(rockchip->apb_base);
+
+ rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(rockchip->rst_gpio))
+ return PTR_ERR(rockchip->rst_gpio);
+
+ return 0;
+}
+
+static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ int ret;
+
+ rockchip->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(rockchip->phy))
+ return dev_err_probe(dev, PTR_ERR(rockchip->phy),
+ "missing PHY\n");
+
+ ret = phy_init(rockchip->phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(rockchip->phy);
+ if (ret)
+ phy_exit(rockchip->phy);
+
+ return ret;
+}
+
+static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
+{
+ phy_exit(rockchip->phy);
+ phy_power_off(rockchip->phy);
+}
+
+static int rockchip_pcie_reset_control_release(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+
+ rockchip->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(rockchip->rst))
+ return dev_err_probe(dev, PTR_ERR(rockchip->rst),
+ "failed to get reset lines\n");
+
+ return reset_control_deassert(rockchip->rst);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = rockchip_pcie_link_up,
+ .start_link = rockchip_pcie_start_link,
+};
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie *rockchip;
+ struct pcie_port *pp;
+ int ret;
+
+ rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
+ if (!rockchip)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rockchip);
+
+ rockchip->pci.dev = dev;
+ rockchip->pci.ops = &dw_pcie_ops;
+
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
+
+ ret = rockchip_pcie_resource_get(pdev, rockchip);
+ if (ret)
+ return ret;
+
+ /* DON'T MOVE ME: must be enable before PHY init */
+ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+ if (IS_ERR(rockchip->vpcie3v3)) {
+ if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(rockchip->vpcie3v3),
+ "failed to get vpcie3v3 regulator\n");
+ rockchip->vpcie3v3 = NULL;
+ } else {
+ ret = regulator_enable(rockchip->vpcie3v3);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie3v3 regulator\n");
+ return ret;
+ }
+ }
+
+ ret = rockchip_pcie_phy_init(rockchip);
+ if (ret)
+ goto disable_regulator;
+
+ ret = rockchip_pcie_reset_control_release(rockchip);
+ if (ret)
+ goto deinit_phy;
+
+ ret = rockchip_pcie_clk_init(rockchip);
+ if (ret)
+ goto deinit_phy;
+
+ ret = dw_pcie_host_init(pp);
+ if (!ret)
+ return 0;
+
+ clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
+deinit_phy:
+ rockchip_pcie_phy_deinit(rockchip);
+disable_regulator:
+ if (rockchip->vpcie3v3)
+ regulator_disable(rockchip->vpcie3v3);
+
+ return ret;
+}
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+ { .compatible = "rockchip,rk3568-pcie", },
+ {},
+};
+
+static struct platform_driver rockchip_pcie_driver = {
+ .driver = {
+ .name = "rockchip-dw-pcie",
+ .of_match_table = rockchip_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rockchip_pcie_probe,
+};
+builtin_platform_driver(rockchip_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
new file mode 100644
index 000000000000..1ac29a6eef22
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -0,0 +1,460 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe controller driver for Intel Keem Bay
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "pcie-designware.h"
+
+/* PCIE_REGS_APB_SLV Registers */
+#define PCIE_REGS_PCIE_CFG 0x0004
+#define PCIE_DEVICE_TYPE BIT(8)
+#define PCIE_RSTN BIT(0)
+#define PCIE_REGS_PCIE_APP_CNTRL 0x0008
+#define APP_LTSSM_ENABLE BIT(0)
+#define PCIE_REGS_INTERRUPT_ENABLE 0x0028
+#define MSI_CTRL_INT_EN BIT(8)
+#define EDMA_INT_EN GENMASK(7, 0)
+#define PCIE_REGS_INTERRUPT_STATUS 0x002c
+#define MSI_CTRL_INT BIT(8)
+#define PCIE_REGS_PCIE_SII_PM_STATE 0x00b0
+#define SMLH_LINK_UP BIT(19)
+#define RDLH_LINK_UP BIT(8)
+#define PCIE_REGS_PCIE_SII_LINK_UP (SMLH_LINK_UP | RDLH_LINK_UP)
+#define PCIE_REGS_PCIE_PHY_CNTL 0x0164
+#define PHY0_SRAM_BYPASS BIT(8)
+#define PCIE_REGS_PCIE_PHY_STAT 0x0168
+#define PHY0_MPLLA_STATE BIT(1)
+#define PCIE_REGS_LJPLL_STA 0x016c
+#define LJPLL_LOCK BIT(0)
+#define PCIE_REGS_LJPLL_CNTRL_0 0x0170
+#define LJPLL_EN BIT(29)
+#define LJPLL_FOUT_EN GENMASK(24, 21)
+#define PCIE_REGS_LJPLL_CNTRL_2 0x0178
+#define LJPLL_REF_DIV GENMASK(17, 12)
+#define LJPLL_FB_DIV GENMASK(11, 0)
+#define PCIE_REGS_LJPLL_CNTRL_3 0x017c
+#define LJPLL_POST_DIV3A GENMASK(24, 22)
+#define LJPLL_POST_DIV2A GENMASK(18, 16)
+
+#define PERST_DELAY_US 1000
+#define AUX_CLK_RATE_HZ 24000000
+
+struct keembay_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ enum dw_pcie_device_mode mode;
+
+ struct clk *clk_master;
+ struct clk *clk_aux;
+ struct gpio_desc *reset;
+};
+
+struct keembay_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+};
+
+static void keembay_ep_reset_assert(struct keembay_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 1);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void keembay_ep_reset_deassert(struct keembay_pcie *pcie)
+{
+ /*
+ * Ensure that PERST# is asserted for a minimum of 100ms.
+ *
+ * For more details, refer to PCI Express Card Electromechanical
+ * Specification Revision 1.1, Table-2.4.
+ */
+ msleep(100);
+
+ gpiod_set_value_cansleep(pcie->reset, 0);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void keembay_pcie_ltssm_set(struct keembay_pcie *pcie, bool enable)
+{
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+ if (enable)
+ val |= APP_LTSSM_ENABLE;
+ else
+ val &= ~APP_LTSSM_ENABLE;
+ writel(val, pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+}
+
+static int keembay_pcie_link_up(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_SII_PM_STATE);
+
+ return (val & PCIE_REGS_PCIE_SII_LINK_UP) == PCIE_REGS_PCIE_SII_LINK_UP;
+}
+
+static int keembay_pcie_start_link(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+ int ret;
+
+ if (pcie->mode == DW_PCIE_EP_TYPE)
+ return 0;
+
+ keembay_pcie_ltssm_set(pcie, false);
+
+ ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_PCIE_PHY_STAT,
+ val, val & PHY0_MPLLA_STATE, 20,
+ 500 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(pci->dev, "MPLLA is not locked\n");
+ return ret;
+ }
+
+ keembay_pcie_ltssm_set(pcie, true);
+
+ return 0;
+}
+
+static void keembay_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ keembay_pcie_ltssm_set(pcie, false);
+}
+
+static const struct dw_pcie_ops keembay_pcie_ops = {
+ .link_up = keembay_pcie_link_up,
+ .start_link = keembay_pcie_start_link,
+ .stop_link = keembay_pcie_stop_link,
+};
+
+static inline struct clk *keembay_pcie_probe_clock(struct device *dev,
+ const char *id, u64 rate)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+
+ if (rate) {
+ ret = clk_set_rate(clk, rate);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = devm_add_action_or_reset(dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+
+static int keembay_pcie_probe_clocks(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+
+ pcie->clk_master = keembay_pcie_probe_clock(dev, "master", 0);
+ if (IS_ERR(pcie->clk_master))
+ return dev_err_probe(dev, PTR_ERR(pcie->clk_master),
+ "Failed to enable master clock");
+
+ pcie->clk_aux = keembay_pcie_probe_clock(dev, "aux", AUX_CLK_RATE_HZ);
+ if (IS_ERR(pcie->clk_aux))
+ return dev_err_probe(dev, PTR_ERR(pcie->clk_aux),
+ "Failed to enable auxiliary clock");
+
+ return 0;
+}
+
+/*
+ * Initialize the internal PCIe PLL in Host mode.
+ * See the following sections in Keem Bay data book,
+ * (1) 6.4.6.1 PCIe Subsystem Example Initialization,
+ * (2) 6.8 PCIe Low Jitter PLL for Ref Clk Generation.
+ */
+static int keembay_pcie_pll_init(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val;
+ int ret;
+
+ val = FIELD_PREP(LJPLL_REF_DIV, 0) | FIELD_PREP(LJPLL_FB_DIV, 0x32);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_2);
+
+ val = FIELD_PREP(LJPLL_POST_DIV3A, 0x2) |
+ FIELD_PREP(LJPLL_POST_DIV2A, 0x2);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_3);
+
+ val = FIELD_PREP(LJPLL_EN, 0x1) | FIELD_PREP(LJPLL_FOUT_EN, 0xc);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_0);
+
+ ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_LJPLL_STA,
+ val, val & LJPLL_LOCK, 20,
+ 500 * USEC_PER_MSEC);
+ if (ret)
+ dev_err(pci->dev, "Low jitter PLL is not locked\n");
+
+ return ret;
+}
+
+static void keembay_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ struct keembay_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 val, mask, status;
+ struct pcie_port *pp;
+
+ /*
+ * Keem Bay PCIe Controller provides an additional IP logic on top of
+ * standard DWC IP to clear MSI IRQ by writing '1' to the respective
+ * bit of the status register.
+ *
+ * So, a chained irq handler is defined to handle this additional
+ * IP logic.
+ */
+
+ chained_irq_enter(chip, desc);
+
+ pp = &pcie->pci.pp;
+ val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS);
+ mask = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+
+ status = val & mask;
+
+ if (status & MSI_CTRL_INT) {
+ dw_handle_msi_irq(pp);
+ writel(status, pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int keembay_pcie_setup_msi_irq(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "pcie");
+ if (irq < 0)
+ return irq;
+
+ irq_set_chained_handler_and_data(irq, keembay_pcie_msi_irq_handler,
+ pcie);
+
+ return 0;
+}
+
+static void keembay_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ writel(EDMA_INT_EN, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+}
+
+static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ /* Legacy interrupts are not supported in Keem Bay */
+ dev_err(pci->dev, "Legacy IRQ is not supported\n");
+ return -EINVAL;
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "Unknown IRQ type %d\n", type);
+ return -EINVAL;
+ }
+}
+
+static const struct pci_epc_features keembay_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .reserved_bar = BIT(BAR_1) | BIT(BAR_3) | BIT(BAR_5),
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .align = SZ_16K,
+};
+
+static const struct pci_epc_features *
+keembay_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &keembay_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops keembay_pcie_ep_ops = {
+ .ep_init = keembay_pcie_ep_init,
+ .raise_irq = keembay_pcie_ep_raise_irq,
+ .get_features = keembay_pcie_get_features,
+};
+
+static const struct dw_pcie_host_ops keembay_pcie_host_ops = {
+};
+
+static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ u32 val;
+ int ret;
+
+ pp->ops = &keembay_pcie_host_ops;
+ pp->msi_irq = -ENODEV;
+
+ ret = keembay_pcie_setup_msi_irq(pcie);
+ if (ret)
+ return ret;
+
+ pcie->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset))
+ return PTR_ERR(pcie->reset);
+
+ ret = keembay_pcie_probe_clocks(pcie);
+ if (ret)
+ return ret;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL);
+ val |= PHY0_SRAM_BYPASS;
+ writel(val, pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL);
+
+ writel(PCIE_DEVICE_TYPE, pcie->apb_base + PCIE_REGS_PCIE_CFG);
+
+ ret = keembay_pcie_pll_init(pcie);
+ if (ret)
+ return ret;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_CFG);
+ writel(val | PCIE_RSTN, pcie->apb_base + PCIE_REGS_PCIE_CFG);
+ keembay_ep_reset_deassert(pcie);
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ keembay_ep_reset_assert(pcie);
+ dev_err(dev, "Failed to initialize host: %d\n", ret);
+ return ret;
+ }
+
+ val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ val |= MSI_CTRL_INT_EN;
+ writel(val, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+
+ return 0;
+}
+
+static int keembay_pcie_probe(struct platform_device *pdev)
+{
+ const struct keembay_pcie_of_data *data;
+ struct device *dev = &pdev->dev;
+ struct keembay_pcie *pcie;
+ struct dw_pcie *pci;
+ enum dw_pcie_device_mode mode;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ mode = (enum dw_pcie_device_mode)data->mode;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &keembay_pcie_ops;
+
+ pcie->mode = mode;
+
+ pcie->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(pcie->apb_base))
+ return PTR_ERR(pcie->apb_base);
+
+ platform_set_drvdata(pdev, pcie);
+
+ switch (pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_HOST))
+ return -ENODEV;
+
+ return keembay_pcie_add_pcie_port(pcie, pdev);
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_EP))
+ return -ENODEV;
+
+ pci->ep.ops = &keembay_pcie_ep_ops;
+ return dw_pcie_ep_init(&pci->ep);
+ default:
+ dev_err(dev, "Invalid device type %d\n", pcie->mode);
+ return -ENODEV;
+ }
+}
+
+static const struct keembay_pcie_of_data keembay_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct keembay_pcie_of_data keembay_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id keembay_pcie_of_match[] = {
+ {
+ .compatible = "intel,keembay-pcie",
+ .data = &keembay_pcie_rc_of_data,
+ },
+ {
+ .compatible = "intel,keembay-pcie-ep",
+ .data = &keembay_pcie_ep_of_data,
+ },
+ {}
+};
+
+static struct platform_driver keembay_pcie_driver = {
+ .driver = {
+ .name = "keembay-pcie",
+ .of_match_table = keembay_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = keembay_pcie_probe,
+};
+builtin_platform_driver(keembay_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 3ec7b29d5dc7..904976913081 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -497,19 +497,19 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
struct tegra_pcie_dw *pcie = arg;
struct dw_pcie_ep *ep = &pcie->pci.ep;
int spurious = 1;
- u32 val, tmp;
+ u32 status_l0, status_l1, link_status;
- val = appl_readl(pcie, APPL_INTR_STATUS_L0);
- if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
- appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
- if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
pex_ep_event_hot_rst_done(pcie);
- if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
- tmp = appl_readl(pcie, APPL_LINK_STATUS);
- if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) {
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
+ link_status = appl_readl(pcie, APPL_LINK_STATUS);
+ if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
dev_dbg(pcie->dev, "Link is up with Host\n");
dw_pcie_ep_linkup(ep);
}
@@ -518,11 +518,11 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
spurious = 0;
}
- if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
- appl_writel(pcie, val, APPL_INTR_STATUS_L1_15);
+ if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
- if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
+ if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
return IRQ_WAKE_THREAD;
spurious = 0;
@@ -530,8 +530,8 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
if (spurious) {
dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
- val);
- appl_writel(pcie, val, APPL_INTR_STATUS_L0);
+ status_l0);
+ appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
}
return IRQ_HANDLED;
@@ -1493,6 +1493,16 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
return;
}
+ /*
+ * PCIe controller exits from L2 only if reset is applied, so
+ * controller doesn't handle interrupts. But in cases where
+ * L2 entry fails, PERST# is asserted which can trigger surprise
+ * link down AER. However this function call happens in
+ * suspend_noirq(), so AER interrupt will not be processed.
+ * Disable all interrupts to avoid such a scenario.
+ */
+ appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0);
+
if (tegra_pcie_try_link_l2(pcie)) {
dev_info(pcie->dev, "Link didn't transition to L2 state\n");
/*
@@ -1763,7 +1773,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
- val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
+ val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
ret = dw_pcie_ep_init_complete(ep);
@@ -1935,13 +1945,6 @@ static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
return ret;
}
- name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_ep_work",
- pcie->cid);
- if (!name) {
- dev_err(dev, "Failed to create PCIe EP work thread string\n");
- return -ENOMEM;
- }
-
pm_runtime_enable(dev);
ret = dw_pcie_ep_init(ep);
@@ -2236,6 +2239,11 @@ static int tegra_pcie_dw_resume_early(struct device *dev)
struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
u32 val;
+ if (pcie->mode == DW_PCIE_EP_TYPE) {
+ dev_err(dev, "Suspend is not supported in EP mode");
+ return -ENOTSUPP;
+ }
+
if (!pcie->link_state)
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 7e8bad326770..d842fd018129 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -235,7 +235,7 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long reg;
- u32 val, bit, virq;
+ u32 val, bit;
/* INT for debug */
val = readl(priv->base + PCL_RCV_INT);
@@ -257,10 +257,8 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
val = readl(priv->base + PCL_RCV_INTX);
reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
- for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
- virq = irq_linear_revmap(priv->legacy_irq_domain, bit);
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(priv->legacy_irq_domain, bit);
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
new file mode 100644
index 000000000000..a88eab6829bb
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DWC PCIe RC driver for Toshiba Visconti ARM SoC
+ *
+ * Copyright (C) 2021 Toshiba Electronic Device & Storage Corporation
+ * Copyright (C) 2021 TOSHIBA CORPORATION
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+#include "../../pci.h"
+
+struct visconti_pcie {
+ struct dw_pcie pci;
+ void __iomem *ulreg_base;
+ void __iomem *smu_base;
+ void __iomem *mpu_base;
+ struct clk *refclk;
+ struct clk *coreclk;
+ struct clk *auxclk;
+};
+
+#define PCIE_UL_REG_S_PCIE_MODE 0x00F4
+#define PCIE_UL_REG_S_PCIE_MODE_EP 0x00
+#define PCIE_UL_REG_S_PCIE_MODE_RC 0x04
+
+#define PCIE_UL_REG_S_PERSTN_CTRL 0x00F8
+#define PCIE_UL_IOM_PCIE_PERSTN_I_EN BIT(3)
+#define PCIE_UL_DIRECT_PERSTN_EN BIT(2)
+#define PCIE_UL_PERSTN_OUT BIT(1)
+#define PCIE_UL_DIRECT_PERSTN BIT(0)
+#define PCIE_UL_REG_S_PERSTN_CTRL_INIT (PCIE_UL_IOM_PCIE_PERSTN_I_EN | \
+ PCIE_UL_DIRECT_PERSTN_EN | \
+ PCIE_UL_DIRECT_PERSTN)
+
+#define PCIE_UL_REG_S_PHY_INIT_02 0x0104
+#define PCIE_UL_PHY0_SRAM_EXT_LD_DONE BIT(0)
+
+#define PCIE_UL_REG_S_PHY_INIT_03 0x0108
+#define PCIE_UL_PHY0_SRAM_INIT_DONE BIT(0)
+
+#define PCIE_UL_REG_S_INT_EVENT_MASK1 0x0138
+#define PCIE_UL_CFG_PME_INT BIT(0)
+#define PCIE_UL_CFG_LINK_EQ_REQ_INT BIT(1)
+#define PCIE_UL_EDMA_INT0 BIT(2)
+#define PCIE_UL_EDMA_INT1 BIT(3)
+#define PCIE_UL_EDMA_INT2 BIT(4)
+#define PCIE_UL_EDMA_INT3 BIT(5)
+#define PCIE_UL_S_INT_EVENT_MASK1_ALL (PCIE_UL_CFG_PME_INT | \
+ PCIE_UL_CFG_LINK_EQ_REQ_INT | \
+ PCIE_UL_EDMA_INT0 | \
+ PCIE_UL_EDMA_INT1 | \
+ PCIE_UL_EDMA_INT2 | \
+ PCIE_UL_EDMA_INT3)
+
+#define PCIE_UL_REG_S_SB_MON 0x0198
+#define PCIE_UL_REG_S_SIG_MON 0x019C
+#define PCIE_UL_CORE_RST_N_MON BIT(0)
+
+#define PCIE_UL_REG_V_SII_DBG_00 0x0844
+#define PCIE_UL_REG_V_SII_GEN_CTRL_01 0x0860
+#define PCIE_UL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCIE_UL_REG_V_PHY_ST_00 0x0864
+#define PCIE_UL_SMLH_LINK_UP BIT(0)
+
+#define PCIE_UL_REG_V_PHY_ST_02 0x0868
+#define PCIE_UL_S_DETECT_ACT 0x01
+#define PCIE_UL_S_L0 0x11
+
+#define PISMU_CKON_PCIE 0x0038
+#define PISMU_CKON_PCIE_AUX_CLK BIT(1)
+#define PISMU_CKON_PCIE_MSTR_ACLK BIT(0)
+
+#define PISMU_RSOFF_PCIE 0x0538
+#define PISMU_RSOFF_PCIE_ULREG_RST_N BIT(1)
+#define PISMU_RSOFF_PCIE_PWR_UP_RST_N BIT(0)
+
+#define PCIE_MPU_REG_MP_EN 0x0
+#define MPU_MP_EN_DISABLE BIT(0)
+
+/* Access registers in PCIe ulreg */
+static void visconti_ulreg_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->ulreg_base + reg);
+}
+
+static u32 visconti_ulreg_readl(struct visconti_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->ulreg_base + reg);
+}
+
+/* Access registers in PCIe smu */
+static void visconti_smu_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->smu_base + reg);
+}
+
+/* Access registers in PCIe mpu */
+static void visconti_mpu_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->mpu_base + reg);
+}
+
+static u32 visconti_mpu_readl(struct visconti_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->mpu_base + reg);
+}
+
+static int visconti_pcie_link_up(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr = pcie->ulreg_base;
+ u32 val = readl_relaxed(addr + PCIE_UL_REG_V_PHY_ST_02);
+
+ return !!(val & PCIE_UL_S_L0);
+}
+
+static int visconti_pcie_start_link(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr = pcie->ulreg_base;
+ u32 val;
+ int ret;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_APP_LTSSM_ENABLE,
+ PCIE_UL_REG_V_SII_GEN_CTRL_01);
+
+ ret = readl_relaxed_poll_timeout(addr + PCIE_UL_REG_V_PHY_ST_02,
+ val, (val & PCIE_UL_S_L0),
+ 90000, 100000);
+ if (ret)
+ return ret;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_S_INT_EVENT_MASK1_ALL,
+ PCIE_UL_REG_S_INT_EVENT_MASK1);
+
+ if (dw_pcie_link_up(pci)) {
+ val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN);
+ visconti_mpu_writel(pcie, val & ~MPU_MP_EN_DISABLE,
+ PCIE_MPU_REG_MP_EN);
+ }
+
+ return 0;
+}
+
+static void visconti_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+
+ val = visconti_ulreg_readl(pcie, PCIE_UL_REG_V_SII_GEN_CTRL_01);
+ val &= ~PCIE_UL_APP_LTSSM_ENABLE;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_V_SII_GEN_CTRL_01);
+
+ val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN);
+ visconti_mpu_writel(pcie, val | MPU_MP_EN_DISABLE, PCIE_MPU_REG_MP_EN);
+}
+
+/*
+ * In this SoC specification, the CPU bus outputs the offset value from
+ * 0x40000000 to the PCIe bus, so 0x40000000 is subtracted from the CPU
+ * bus address. This 0x40000000 is also based on io_base from DT.
+ */
+static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
+{
+ struct pcie_port *pp = &pci->pp;
+
+ return cpu_addr & ~pp->io_base;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = visconti_pcie_cpu_addr_fixup,
+ .link_up = visconti_pcie_link_up,
+ .start_link = visconti_pcie_start_link,
+ .stop_link = visconti_pcie_stop_link,
+};
+
+static int visconti_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr;
+ int err;
+ u32 val;
+
+ visconti_smu_writel(pcie,
+ PISMU_CKON_PCIE_AUX_CLK | PISMU_CKON_PCIE_MSTR_ACLK,
+ PISMU_CKON_PCIE);
+ ndelay(250);
+
+ visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_ULREG_RST_N,
+ PISMU_RSOFF_PCIE);
+ visconti_ulreg_writel(pcie, PCIE_UL_REG_S_PCIE_MODE_RC,
+ PCIE_UL_REG_S_PCIE_MODE);
+
+ val = PCIE_UL_REG_S_PERSTN_CTRL_INIT;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL);
+ udelay(100);
+
+ val |= PCIE_UL_PERSTN_OUT;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL);
+ udelay(100);
+
+ visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_PWR_UP_RST_N,
+ PISMU_RSOFF_PCIE);
+
+ addr = pcie->ulreg_base + PCIE_UL_REG_S_PHY_INIT_03;
+ err = readl_relaxed_poll_timeout(addr, val,
+ (val & PCIE_UL_PHY0_SRAM_INIT_DONE),
+ 100, 1000);
+ if (err)
+ return err;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_PHY0_SRAM_EXT_LD_DONE,
+ PCIE_UL_REG_S_PHY_INIT_02);
+
+ addr = pcie->ulreg_base + PCIE_UL_REG_S_SIG_MON;
+ return readl_relaxed_poll_timeout(addr, val,
+ (val & PCIE_UL_CORE_RST_N_MON), 100,
+ 1000);
+}
+
+static const struct dw_pcie_host_ops visconti_pcie_host_ops = {
+ .host_init = visconti_pcie_host_init,
+};
+
+static int visconti_get_resources(struct platform_device *pdev,
+ struct visconti_pcie *pcie)
+{
+ struct device *dev = &pdev->dev;
+
+ pcie->ulreg_base = devm_platform_ioremap_resource_byname(pdev, "ulreg");
+ if (IS_ERR(pcie->ulreg_base))
+ return PTR_ERR(pcie->ulreg_base);
+
+ pcie->smu_base = devm_platform_ioremap_resource_byname(pdev, "smu");
+ if (IS_ERR(pcie->smu_base))
+ return PTR_ERR(pcie->smu_base);
+
+ pcie->mpu_base = devm_platform_ioremap_resource_byname(pdev, "mpu");
+ if (IS_ERR(pcie->mpu_base))
+ return PTR_ERR(pcie->mpu_base);
+
+ pcie->refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(pcie->refclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->refclk),
+ "Failed to get ref clock\n");
+
+ pcie->coreclk = devm_clk_get(dev, "core");
+ if (IS_ERR(pcie->coreclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->coreclk),
+ "Failed to get core clock\n");
+
+ pcie->auxclk = devm_clk_get(dev, "aux");
+ if (IS_ERR(pcie->auxclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->auxclk),
+ "Failed to get aux clock\n");
+
+ return 0;
+}
+
+static int visconti_add_pcie_port(struct visconti_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+
+ pp->irq = platform_get_irq_byname(pdev, "intr");
+ if (pp->irq < 0) {
+ dev_err(dev, "Interrupt intr is missing");
+ return pp->irq;
+ }
+
+ pp->ops = &visconti_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static int visconti_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct visconti_pcie *pcie;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ ret = visconti_get_resources(pdev, pcie);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return visconti_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id visconti_pcie_match[] = {
+ { .compatible = "toshiba,visconti-pcie" },
+ {},
+};
+
+static struct platform_driver visconti_pcie_driver = {
+ .probe = visconti_pcie_probe,
+ .driver = {
+ .name = "visconti-pcie",
+ .of_match_table = visconti_pcie_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(visconti_pcie_driver);
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index c637de3a389b..f3547aa60140 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -92,7 +92,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
u32 msi_data, msi_addr_lo, msi_addr_hi;
u32 intr_status, msi_status;
unsigned long shifted_status;
- u32 bit, virq, val, mask;
+ u32 bit, val, mask;
/*
* The core provides a single interrupt for both INTx/MSI messages.
@@ -114,11 +114,10 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
shifted_status >>= PAB_INTX_START;
do {
for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
- virq = irq_find_mapping(rp->intx_domain,
- bit + 1);
- if (virq)
- generic_handle_irq(virq);
- else
+ int ret;
+ ret = generic_handle_domain_irq(rp->intx_domain,
+ bit + 1);
+ if (ret)
dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
bit);
@@ -155,9 +154,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
msi_data, msi_addr_hi, msi_addr_lo);
- virq = irq_find_mapping(msi->dev_domain, msi_data);
- if (virq)
- generic_handle_irq(virq);
+ generic_handle_domain_irq(msi->dev_domain, msi_data);
msi_status = readl_relaxed(pcie->apb_csr_base +
MSI_STATUS_OFFSET);
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index c95ebe808f92..596ebcfcc82d 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -58,6 +58,7 @@
#define PIO_COMPLETION_STATUS_CRS 2
#define PIO_COMPLETION_STATUS_CA 4
#define PIO_NON_POSTED_REQ BIT(10)
+#define PIO_ERR_STATUS BIT(11)
#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
@@ -118,6 +119,46 @@
#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
+/* PCIe window configuration */
+#define OB_WIN_BASE_ADDR 0x4c00
+#define OB_WIN_BLOCK_SIZE 0x20
+#define OB_WIN_COUNT 8
+#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
+ OB_WIN_BLOCK_SIZE * (win) + \
+ (offset))
+#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
+#define OB_WIN_ENABLE BIT(0)
+#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
+#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
+#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
+#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
+#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
+#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
+#define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
+#define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
+#define OB_WIN_FUNC_NUM_SHIFT 24
+#define OB_WIN_FUNC_NUM_ENABLE BIT(23)
+#define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
+#define OB_WIN_BUS_NUM_BITS_SHIFT 20
+#define OB_WIN_MSG_CODE_ENABLE BIT(22)
+#define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
+#define OB_WIN_MSG_CODE_SHIFT 14
+#define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
+#define OB_WIN_ATTR_ENABLE BIT(11)
+#define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
+#define OB_WIN_ATTR_TC_SHIFT 8
+#define OB_WIN_ATTR_RELAXED BIT(7)
+#define OB_WIN_ATTR_NOSNOOP BIT(6)
+#define OB_WIN_ATTR_POISON BIT(5)
+#define OB_WIN_ATTR_IDO BIT(4)
+#define OB_WIN_TYPE_MASK GENMASK(3, 0)
+#define OB_WIN_TYPE_SHIFT 0
+#define OB_WIN_TYPE_MEM 0x0
+#define OB_WIN_TYPE_IO 0x4
+#define OB_WIN_TYPE_CONFIG_TYPE0 0x8
+#define OB_WIN_TYPE_CONFIG_TYPE1 0x9
+#define OB_WIN_TYPE_MSG 0xc
+
/* LMI registers base address and register offsets */
#define LMI_BASE_ADDR 0x6000
#define CFG_REG (LMI_BASE_ADDR + 0x0)
@@ -166,7 +207,7 @@
#define PCIE_CONFIG_WR_TYPE0 0xa
#define PCIE_CONFIG_WR_TYPE1 0xb
-#define PIO_RETRY_CNT 500
+#define PIO_RETRY_CNT 750000 /* 1.5 s */
#define PIO_RETRY_DELAY 2 /* 2 us*/
#define LINK_WAIT_MAX_RETRIES 10
@@ -177,11 +218,21 @@
#define MSI_IRQ_NUM 32
+#define CFG_RD_CRS_VAL 0xffff0001
+
struct advk_pcie {
struct platform_device *pdev;
void __iomem *base;
+ struct {
+ phys_addr_t match;
+ phys_addr_t remap;
+ phys_addr_t mask;
+ u32 actions;
+ } wins[OB_WIN_COUNT];
+ u8 wins_count;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
+ raw_spinlock_t irq_lock;
struct irq_domain *msi_domain;
struct irq_domain *msi_inner_domain;
struct irq_chip msi_bottom_irq_chip;
@@ -366,9 +417,39 @@ err:
dev_err(dev, "link never came up\n");
}
+/*
+ * Set PCIe address window register which could be used for memory
+ * mapping.
+ */
+static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
+ phys_addr_t match, phys_addr_t remap,
+ phys_addr_t mask, u32 actions)
+{
+ advk_writel(pcie, OB_WIN_ENABLE |
+ lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
+ advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
+ advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
+ advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
+ advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
+ advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
+ advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
+}
+
+static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
+{
+ advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
+}
+
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
u32 reg;
+ int i;
/* Enable TX */
reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
@@ -447,15 +528,51 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
+ /*
+ * Enable AXI address window location generation:
+ * When it is enabled, the default outbound window
+ * configurations (Default User Field: 0xD0074CFC)
+ * are used to transparent address translation for
+ * the outbound transactions. Thus, PCIe address
+ * windows are not required for transparent memory
+ * access when default outbound window configuration
+ * is set for memory access.
+ */
reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
- /* Bypass the address window mapping for PIO */
+ /*
+ * Set memory access in Default User Field so it
+ * is not required to configure PCIe address for
+ * transparent memory access.
+ */
+ advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
+
+ /*
+ * Bypass the address window mapping for PIO:
+ * Since PIO access already contains all required
+ * info over AXI interface by PIO registers, the
+ * address window is not required.
+ */
reg = advk_readl(pcie, PIO_CTRL);
reg |= PIO_CTRL_ADDR_WIN_DISABLE;
advk_writel(pcie, reg, PIO_CTRL);
+ /*
+ * Configure PCIe address windows for non-memory or
+ * non-transparent access as by default PCIe uses
+ * transparent memory access.
+ */
+ for (i = 0; i < pcie->wins_count; i++)
+ advk_pcie_set_ob_win(pcie, i,
+ pcie->wins[i].match, pcie->wins[i].remap,
+ pcie->wins[i].mask, pcie->wins[i].actions);
+
+ /* Disable remaining PCIe outbound windows */
+ for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
+ advk_pcie_disable_ob_win(pcie, i);
+
advk_pcie_train_link(pcie);
/*
@@ -472,7 +589,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
}
-static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
+static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
{
struct device *dev = &pcie->pdev->dev;
u32 reg;
@@ -483,14 +600,70 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
status = (reg & PIO_COMPLETION_STATUS_MASK) >>
PIO_COMPLETION_STATUS_SHIFT;
- if (!status)
- return;
-
+ /*
+ * According to HW spec, the PIO status check sequence as below:
+ * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
+ * it still needs to check Error Status(bit11), only when this bit
+ * indicates no error happen, the operation is successful.
+ * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
+ * means a PIO write error, and for PIO read it is successful with
+ * a read value of 0xFFFFFFFF.
+ * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
+ * only means a PIO write error, and for PIO read it is successful
+ * with a read value of 0xFFFF0001.
+ * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
+ * error for both PIO read and PIO write operation.
+ * 5) other errors are indicated as 'unknown'.
+ */
switch (status) {
+ case PIO_COMPLETION_STATUS_OK:
+ if (reg & PIO_ERR_STATUS) {
+ strcomp_status = "COMP_ERR";
+ break;
+ }
+ /* Get the read result */
+ if (val)
+ *val = advk_readl(pcie, PIO_RD_DATA);
+ /* No error */
+ strcomp_status = NULL;
+ break;
case PIO_COMPLETION_STATUS_UR:
strcomp_status = "UR";
break;
case PIO_COMPLETION_STATUS_CRS:
+ if (allow_crs && val) {
+ /* PCIe r4.0, sec 2.3.2, says:
+ * If CRS Software Visibility is enabled:
+ * For a Configuration Read Request that includes both
+ * bytes of the Vendor ID field of a device Function's
+ * Configuration Space Header, the Root Complex must
+ * complete the Request to the host by returning a
+ * read-data value of 0001h for the Vendor ID field and
+ * all '1's for any additional bytes included in the
+ * request.
+ *
+ * So CRS in this case is not an error status.
+ */
+ *val = CFG_RD_CRS_VAL;
+ strcomp_status = NULL;
+ break;
+ }
+ /* PCIe r4.0, sec 2.3.2, says:
+ * If CRS Software Visibility is not enabled, the Root Complex
+ * must re-issue the Configuration Request as a new Request.
+ * If CRS Software Visibility is enabled: For a Configuration
+ * Write Request or for any other Configuration Read Request,
+ * the Root Complex must re-issue the Configuration Request as
+ * a new Request.
+ * A Root Complex implementation may choose to limit the number
+ * of Configuration Request/CRS Completion Status loops before
+ * determining that something is wrong with the target of the
+ * Request and taking appropriate action, e.g., complete the
+ * Request to the host as a failed transaction.
+ *
+ * To simplify implementation do not re-issue the Configuration
+ * Request and complete the Request as a failed transaction.
+ */
strcomp_status = "CRS";
break;
case PIO_COMPLETION_STATUS_CA:
@@ -501,6 +674,9 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
break;
}
+ if (!strcomp_status)
+ return 0;
+
if (reg & PIO_NON_POSTED_REQ)
str_posted = "Non-posted";
else
@@ -508,6 +684,8 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
+
+ return -EFAULT;
}
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
@@ -545,6 +723,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: {
u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
*value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
+ *value |= PCI_EXP_RTCAP_CRSVIS << 16;
return PCI_BRIDGE_EMUL_HANDLED;
}
@@ -626,6 +805,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
{
struct pci_bridge_emul *bridge = &pcie->bridge;
+ int ret;
bridge->conf.vendor =
cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
@@ -649,7 +829,15 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->data = pcie;
bridge->ops = &advk_pci_bridge_emul_ops;
- return pci_bridge_emul_init(bridge, 0);
+ /* PCIe config space can be initialized after pci_bridge_emul_init() */
+ ret = pci_bridge_emul_init(bridge, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Indicates supports for Completion Retry Status */
+ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+
+ return 0;
}
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
@@ -701,6 +889,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
struct advk_pcie *pcie = bus->sysdata;
+ bool allow_crs;
u32 reg;
int ret;
@@ -713,7 +902,24 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return pci_bridge_emul_conf_read(&pcie->bridge, where,
size, val);
+ /*
+ * Completion Retry Status is possible to return only when reading all
+ * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
+ * CRSSVE flag on Root Bridge is enabled.
+ */
+ allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
+ (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
+ PCI_EXP_RTCTL_CRSSVE);
+
if (advk_pcie_pio_is_running(pcie)) {
+ /*
+ * If it is possible return Completion Retry Status so caller
+ * tries to issue the request again instead of failing.
+ */
+ if (allow_crs) {
+ *val = CFG_RD_CRS_VAL;
+ return PCIBIOS_SUCCESSFUL;
+ }
*val = 0xffffffff;
return PCIBIOS_SET_FAILED;
}
@@ -741,14 +947,25 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
ret = advk_pcie_wait_pio(pcie);
if (ret < 0) {
+ /*
+ * If it is possible return Completion Retry Status so caller
+ * tries to issue the request again instead of failing.
+ */
+ if (allow_crs) {
+ *val = CFG_RD_CRS_VAL;
+ return PCIBIOS_SUCCESSFUL;
+ }
*val = 0xffffffff;
return PCIBIOS_SET_FAILED;
}
- advk_pcie_check_pio_status(pcie);
+ /* Check PIO status and get the read result */
+ ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
+ if (ret < 0) {
+ *val = 0xffffffff;
+ return PCIBIOS_SET_FAILED;
+ }
- /* Get the read result */
- *val = advk_readl(pcie, PIO_RD_DATA);
if (size == 1)
*val = (*val >> (8 * (where & 3))) & 0xff;
else if (size == 2)
@@ -812,7 +1029,9 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
if (ret < 0)
return PCIBIOS_SET_FAILED;
- advk_pcie_check_pio_status(pcie);
+ ret = advk_pcie_check_pio_status(pcie, false, NULL);
+ if (ret < 0)
+ return PCIBIOS_SET_FAILED;
return PCIBIOS_SUCCESSFUL;
}
@@ -886,22 +1105,28 @@ static void advk_pcie_irq_mask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
u32 mask;
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static void advk_pcie_irq_unmask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
u32 mask;
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static int advk_pcie_irq_map(struct irq_domain *h,
@@ -985,6 +1210,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
struct irq_chip *irq_chip;
int ret = 0;
+ raw_spin_lock_init(&pcie->irq_lock);
+
pcie_intc_node = of_get_next_child(node, NULL);
if (!pcie_intc_node) {
dev_err(dev, "No PCIe Intc node found\n");
@@ -1049,7 +1276,7 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
{
u32 isr0_val, isr0_mask, isr0_status;
u32 isr1_val, isr1_mask, isr1_status;
- int i, virq;
+ int i;
isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
@@ -1077,8 +1304,7 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
PCIE_ISR1_REG);
- virq = irq_find_mapping(pcie->irq_domain, i);
- generic_handle_irq(virq);
+ generic_handle_domain_irq(pcie->irq_domain, i);
}
}
@@ -1162,6 +1388,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
struct pci_host_bridge *bridge;
+ struct resource_entry *entry;
int ret, irq;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
@@ -1172,6 +1399,80 @@ static int advk_pcie_probe(struct platform_device *pdev)
pcie->pdev = pdev;
platform_set_drvdata(pdev, pcie);
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ resource_size_t start = entry->res->start;
+ resource_size_t size = resource_size(entry->res);
+ unsigned long type = resource_type(entry->res);
+ u64 win_size;
+
+ /*
+ * Aardvark hardware allows to configure also PCIe window
+ * for config type 0 and type 1 mapping, but driver uses
+ * only PIO for issuing configuration transfers which does
+ * not use PCIe window configuration.
+ */
+ if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
+ type != IORESOURCE_IO)
+ continue;
+
+ /*
+ * Skip transparent memory resources. Default outbound access
+ * configuration is set to transparent memory access so it
+ * does not need window configuration.
+ */
+ if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
+ entry->offset == 0)
+ continue;
+
+ /*
+ * The n-th PCIe window is configured by tuple (match, remap, mask)
+ * and an access to address A uses this window if A matches the
+ * match with given mask.
+ * So every PCIe window size must be a power of two and every start
+ * address must be aligned to window size. Minimal size is 64 KiB
+ * because lower 16 bits of mask must be zero. Remapped address
+ * may have set only bits from the mask.
+ */
+ while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
+ /* Calculate the largest aligned window size */
+ win_size = (1ULL << (fls64(size)-1)) |
+ (start ? (1ULL << __ffs64(start)) : 0);
+ win_size = 1ULL << __ffs64(win_size);
+ if (win_size < 0x10000)
+ break;
+
+ dev_dbg(dev,
+ "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
+ pcie->wins_count, (unsigned long long)start,
+ (unsigned long long)start + win_size, type);
+
+ if (type == IORESOURCE_IO) {
+ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
+ pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
+ } else {
+ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
+ pcie->wins[pcie->wins_count].match = start;
+ }
+ pcie->wins[pcie->wins_count].remap = start - entry->offset;
+ pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
+
+ if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
+ break;
+
+ start += win_size;
+ size -= win_size;
+ pcie->wins_count++;
+ }
+
+ if (size > 0) {
+ dev_err(&pcie->pdev->dev,
+ "Invalid PCIe region [0x%llx-0x%llx]\n",
+ (unsigned long long)entry->res->start,
+ (unsigned long long)entry->res->end + 1);
+ return -EINVAL;
+ }
+ }
+
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
@@ -1252,6 +1553,7 @@ static int advk_pcie_remove(struct platform_device *pdev)
{
struct advk_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ int i;
pci_lock_rescan_remove();
pci_stop_root_bus(bridge->bus);
@@ -1261,6 +1563,10 @@ static int advk_pcie_remove(struct platform_device *pdev)
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
+ /* Disable outbound address windows mapping */
+ for (i = 0; i < OB_WIN_COUNT; i++)
+ advk_pcie_disable_ob_win(pcie, i);
+
return 0;
}
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index aefef1986201..88980a44461d 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -314,7 +314,7 @@ static void faraday_pci_irq_handler(struct irq_desc *desc)
for (i = 0; i < 4; i++) {
if ((irq_stat & BIT(i)) == 0)
continue;
- generic_handle_irq(irq_find_mapping(p->irqdomain, i));
+ generic_handle_domain_irq(p->irqdomain, i);
}
chained_irq_exit(irqchip, desc);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index a53bd8728d0d..eaec915ffe62 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -40,6 +40,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/delay.h>
#include <linux/semaphore.h>
#include <linux/irqdomain.h>
@@ -64,6 +65,7 @@ enum pci_protocol_version_t {
PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */
+ PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4), /* WS2022 */
};
#define CPU_AFFINITY_ALL -1ULL
@@ -73,6 +75,7 @@ enum pci_protocol_version_t {
* first.
*/
static enum pci_protocol_version_t pci_protocol_versions[] = {
+ PCI_PROTOCOL_VERSION_1_4,
PCI_PROTOCOL_VERSION_1_3,
PCI_PROTOCOL_VERSION_1_2,
PCI_PROTOCOL_VERSION_1_1,
@@ -122,6 +125,8 @@ enum pci_message_type {
PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19,
+ PCI_RESOURCES_ASSIGNED3 = PCI_MESSAGE_BASE + 0x1A,
+ PCI_CREATE_INTERRUPT_MESSAGE3 = PCI_MESSAGE_BASE + 0x1B,
PCI_MESSAGE_MAXIMUM
};
@@ -235,6 +240,21 @@ struct hv_msi_desc2 {
u16 processor_array[32];
} __packed;
+/*
+ * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
+ * Everything is the same as in 'hv_msi_desc2' except that the size of the
+ * 'vector' field is larger to support bigger vector values. For ex: LPI
+ * vectors on ARM.
+ */
+struct hv_msi_desc3 {
+ u32 vector;
+ u8 delivery_mode;
+ u8 reserved;
+ u16 vector_count;
+ u16 processor_count;
+ u16 processor_array[32];
+} __packed;
+
/**
* struct tran_int_desc
* @reserved: unused, padding
@@ -383,6 +403,12 @@ struct pci_create_interrupt2 {
struct hv_msi_desc2 int_desc;
} __packed;
+struct pci_create_interrupt3 {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ struct hv_msi_desc3 int_desc;
+} __packed;
+
struct pci_delete_interrupt {
struct pci_message message_type;
union win_slot_encoding wslot;
@@ -448,7 +474,13 @@ enum hv_pcibus_state {
};
struct hv_pcibus_device {
+#ifdef CONFIG_X86
struct pci_sysdata sysdata;
+#elif defined(CONFIG_ARM64)
+ struct pci_config_window sysdata;
+#endif
+ struct pci_host_bridge *bridge;
+ struct fwnode_handle *fwnode;
/* Protocol version negotiated with the host */
enum pci_protocol_version_t protocol_version;
enum hv_pcibus_state state;
@@ -464,8 +496,6 @@ struct hv_pcibus_device {
spinlock_t device_list_lock; /* Protect lists below */
void __iomem *cfg_addr;
- struct list_head resources_for_children;
-
struct list_head children;
struct list_head dr_list;
@@ -1328,6 +1358,15 @@ static u32 hv_compose_msi_req_v1(
return sizeof(*int_pkt);
}
+/*
+ * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
+ * by subsequent retarget in hv_irq_unmask().
+ */
+static int hv_compose_msi_req_get_cpu(struct cpumask *affinity)
+{
+ return cpumask_first_and(affinity, cpu_online_mask);
+}
+
static u32 hv_compose_msi_req_v2(
struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
u32 slot, u8 vector)
@@ -1339,12 +1378,27 @@ static u32 hv_compose_msi_req_v2(
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
+ cpu = hv_compose_msi_req_get_cpu(affinity);
+ int_pkt->int_desc.processor_array[0] =
+ hv_cpu_number_to_vp_number(cpu);
+ int_pkt->int_desc.processor_count = 1;
- /*
- * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
- * by subsequent retarget in hv_irq_unmask().
- */
- cpu = cpumask_first_and(affinity, cpu_online_mask);
+ return sizeof(*int_pkt);
+}
+
+static u32 hv_compose_msi_req_v3(
+ struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity,
+ u32 slot, u32 vector)
+{
+ int cpu;
+
+ int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
+ int_pkt->wslot.slot = slot;
+ int_pkt->int_desc.vector = vector;
+ int_pkt->int_desc.reserved = 0;
+ int_pkt->int_desc.vector_count = 1;
+ int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
+ cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
hv_cpu_number_to_vp_number(cpu);
int_pkt->int_desc.processor_count = 1;
@@ -1379,6 +1433,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
union {
struct pci_create_interrupt v1;
struct pci_create_interrupt2 v2;
+ struct pci_create_interrupt3 v3;
} int_pkts;
} __packed ctxt;
@@ -1426,6 +1481,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
cfg->vector);
break;
+ case PCI_PROTOCOL_VERSION_1_4:
+ size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
+ dest,
+ hpdev->desc.win_slot.slot,
+ cfg->vector);
+ break;
+
default:
/* As we only negotiate protocol versions known to this driver,
* this path should never hit. However, this is it not a hot
@@ -1566,7 +1628,7 @@ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
hbus->msi_info.handler = handle_edge_irq;
hbus->msi_info.handler_name = "edge";
hbus->msi_info.data = hbus;
- hbus->irq_domain = pci_msi_create_irq_domain(hbus->sysdata.fwnode,
+ hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
&hbus->msi_info,
x86_vector_domain);
if (!hbus->irq_domain) {
@@ -1575,6 +1637,8 @@ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
return -ENODEV;
}
+ dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
+
return 0;
}
@@ -1797,7 +1861,7 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
- hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
+ hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
name, NULL);
if (IS_ERR(hpdev->pci_slot)) {
pr_warn("pci_create slot %s failed\n", name);
@@ -1827,7 +1891,7 @@ static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
{
struct pci_dev *dev;
- struct pci_bus *bus = hbus->pci_bus;
+ struct pci_bus *bus = hbus->bridge->bus;
struct hv_pci_dev *hv_dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -1850,21 +1914,22 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
*/
static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
{
- /* Register the device */
- hbus->pci_bus = pci_create_root_bus(&hbus->hdev->device,
- 0, /* bus number is always zero */
- &hv_pcifront_ops,
- &hbus->sysdata,
- &hbus->resources_for_children);
- if (!hbus->pci_bus)
- return -ENODEV;
+ int error;
+ struct pci_host_bridge *bridge = hbus->bridge;
+
+ bridge->dev.parent = &hbus->hdev->device;
+ bridge->sysdata = &hbus->sysdata;
+ bridge->ops = &hv_pcifront_ops;
+
+ error = pci_scan_root_bus_bridge(bridge);
+ if (error)
+ return error;
pci_lock_rescan_remove();
- pci_scan_child_bus(hbus->pci_bus);
hv_pci_assign_numa_node(hbus);
- pci_bus_assign_resources(hbus->pci_bus);
+ pci_bus_assign_resources(bridge->bus);
hv_pci_assign_slots(hbus);
- pci_bus_add_devices(hbus->pci_bus);
+ pci_bus_add_devices(bridge->bus);
pci_unlock_rescan_remove();
hbus->state = hv_pcibus_installed;
return 0;
@@ -2127,7 +2192,7 @@ static void pci_devices_present_work(struct work_struct *work)
* because there may have been changes.
*/
pci_lock_rescan_remove();
- pci_scan_child_bus(hbus->pci_bus);
+ pci_scan_child_bus(hbus->bridge->bus);
hv_pci_assign_numa_node(hbus);
hv_pci_assign_slots(hbus);
pci_unlock_rescan_remove();
@@ -2295,11 +2360,11 @@ static void hv_eject_device_work(struct work_struct *work)
/*
* Ejection can come before or after the PCI bus has been set up, so
* attempt to find it and tear down the bus state, if it exists. This
- * must be done without constructs like pci_domain_nr(hbus->pci_bus)
- * because hbus->pci_bus may not exist yet.
+ * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
+ * because hbus->bridge->bus may not exist yet.
*/
wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
- pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
+ pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
if (pdev) {
pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(pdev);
@@ -2662,8 +2727,7 @@ static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
/* Modify this resource to become a bridge window. */
hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
- pci_add_resource(&hbus->resources_for_children,
- hbus->low_mmio_res);
+ pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
}
if (hbus->high_mmio_space) {
@@ -2682,8 +2746,7 @@ static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
/* Modify this resource to become a bridge window. */
hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
- pci_add_resource(&hbus->resources_for_children,
- hbus->high_mmio_res);
+ pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
}
return 0;
@@ -3002,6 +3065,7 @@ static void hv_put_dom_num(u16 dom)
static int hv_pci_probe(struct hv_device *hdev,
const struct hv_vmbus_device_id *dev_id)
{
+ struct pci_host_bridge *bridge;
struct hv_pcibus_device *hbus;
u16 dom_req, dom;
char *name;
@@ -3014,6 +3078,10 @@ static int hv_pci_probe(struct hv_device *hdev,
*/
BUILD_BUG_ON(sizeof(*hbus) > HV_HYP_PAGE_SIZE);
+ bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
+ if (!bridge)
+ return -ENOMEM;
+
/*
* With the recent 59bb47985c1d ("mm, sl[aou]b: guarantee natural
* alignment for kmalloc(power-of-two)"), kzalloc() is able to allocate
@@ -3035,6 +3103,8 @@ static int hv_pci_probe(struct hv_device *hdev,
hbus = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!hbus)
return -ENOMEM;
+
+ hbus->bridge = bridge;
hbus->state = hv_pcibus_init;
hbus->wslot_res_allocated = -1;
@@ -3066,17 +3136,19 @@ static int hv_pci_probe(struct hv_device *hdev,
"PCI dom# 0x%hx has collision, using 0x%hx",
dom_req, dom);
+ hbus->bridge->domain_nr = dom;
+#ifdef CONFIG_X86
hbus->sysdata.domain = dom;
+#endif
hbus->hdev = hdev;
INIT_LIST_HEAD(&hbus->children);
INIT_LIST_HEAD(&hbus->dr_list);
- INIT_LIST_HEAD(&hbus->resources_for_children);
spin_lock_init(&hbus->config_lock);
spin_lock_init(&hbus->device_list_lock);
spin_lock_init(&hbus->retarget_msi_interrupt_lock);
hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
- hbus->sysdata.domain);
+ hbus->bridge->domain_nr);
if (!hbus->wq) {
ret = -ENOMEM;
goto free_dom;
@@ -3113,9 +3185,9 @@ static int hv_pci_probe(struct hv_device *hdev,
goto unmap;
}
- hbus->sysdata.fwnode = irq_domain_alloc_named_fwnode(name);
+ hbus->fwnode = irq_domain_alloc_named_fwnode(name);
kfree(name);
- if (!hbus->sysdata.fwnode) {
+ if (!hbus->fwnode) {
ret = -ENOMEM;
goto unmap;
}
@@ -3193,7 +3265,7 @@ exit_d0:
free_irq_domain:
irq_domain_remove(hbus->irq_domain);
free_fwnode:
- irq_domain_free_fwnode(hbus->sysdata.fwnode);
+ irq_domain_free_fwnode(hbus->fwnode);
unmap:
iounmap(hbus->cfg_addr);
free_config:
@@ -3203,7 +3275,7 @@ close:
destroy_wq:
destroy_workqueue(hbus->wq);
free_dom:
- hv_put_dom_num(hbus->sysdata.domain);
+ hv_put_dom_num(hbus->bridge->domain_nr);
free_bus:
kfree(hbus);
return ret;
@@ -3295,9 +3367,9 @@ static int hv_pci_remove(struct hv_device *hdev)
/* Remove the bus from PCI's point of view. */
pci_lock_rescan_remove();
- pci_stop_root_bus(hbus->pci_bus);
+ pci_stop_root_bus(hbus->bridge->bus);
hv_pci_remove_slots(hbus);
- pci_remove_root_bus(hbus->pci_bus);
+ pci_remove_root_bus(hbus->bridge->bus);
pci_unlock_rescan_remove();
}
@@ -3307,12 +3379,11 @@ static int hv_pci_remove(struct hv_device *hdev)
iounmap(hbus->cfg_addr);
hv_free_config_window(hbus);
- pci_free_resource_list(&hbus->resources_for_children);
hv_pci_free_bridge_windows(hbus);
irq_domain_remove(hbus->irq_domain);
- irq_domain_free_fwnode(hbus->sysdata.fwnode);
+ irq_domain_free_fwnode(hbus->fwnode);
- hv_put_dom_num(hbus->sysdata.domain);
+ hv_put_dom_num(hbus->bridge->domain_nr);
kfree(hbus);
return ret;
@@ -3390,7 +3461,7 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
*/
static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
{
- pci_walk_bus(hbus->pci_bus, hv_pci_restore_msi_msg, NULL);
+ pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
}
static int hv_pci_resume(struct hv_device *hdev)
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index c979229a6d0d..cb0aa65d6934 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -372,11 +372,6 @@ struct tegra_pcie_port {
struct gpio_desc *reset_gpio;
};
-struct tegra_pcie_bus {
- struct list_head list;
- unsigned int nr;
-};
-
static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
unsigned long offset)
{
@@ -764,7 +759,7 @@ static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
static irqreturn_t tegra_pcie_isr(int irq, void *arg)
{
- const char *err_msg[] = {
+ static const char * const err_msg[] = {
"Unknown",
"AXI slave error",
"AXI decode error",
@@ -1553,12 +1548,10 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
while (reg) {
unsigned int offset = find_first_bit(&reg, 32);
unsigned int index = i * 32 + offset;
- unsigned int irq;
+ int ret;
- irq = irq_find_mapping(msi->domain->parent, index);
- if (irq) {
- generic_handle_irq(irq);
- } else {
+ ret = generic_handle_domain_irq(msi->domain->parent, index);
+ if (ret) {
/*
* that's weird who triggered this?
* just clear it
@@ -2193,13 +2186,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->np = port;
rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
- if (IS_ERR(rp->base))
- return PTR_ERR(rp->base);
+ if (IS_ERR(rp->base)) {
+ err = PTR_ERR(rp->base);
+ goto err_node_put;
+ }
label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
if (!label) {
- dev_err(dev, "failed to create reset GPIO label\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_node_put;
}
/*
@@ -2217,7 +2212,8 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
} else {
dev_err(dev, "failed to get reset GPIO: %ld\n",
PTR_ERR(rp->reset_gpio));
- return PTR_ERR(rp->reset_gpio);
+ err = PTR_ERR(rp->reset_gpio);
+ goto err_node_put;
}
}
@@ -2548,7 +2544,7 @@ static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
if (list_empty(&pcie->ports))
return NULL;
- seq_printf(s, "Index Status\n");
+ seq_puts(s, "Index Status\n");
return seq_list_start(&pcie->ports, *pos);
}
@@ -2585,16 +2581,16 @@ static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
seq_printf(s, "%2u ", port->index);
if (up)
- seq_printf(s, "up");
+ seq_puts(s, "up");
if (active) {
if (up)
- seq_printf(s, ", ");
+ seq_puts(s, ", ");
- seq_printf(s, "active");
+ seq_puts(s, "active");
}
- seq_printf(s, "\n");
+ seq_puts(s, "\n");
return 0;
}
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index 1c34c897a7e2..b7a8e062fcc5 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -291,8 +291,7 @@ static void xgene_msi_isr(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
struct xgene_msi_group *msi_groups;
struct xgene_msi *xgene_msi;
- unsigned int virq;
- int msir_index, msir_val, hw_irq;
+ int msir_index, msir_val, hw_irq, ret;
u32 intr_index, grp_select, msi_grp;
chained_irq_enter(chip, desc);
@@ -330,10 +329,8 @@ static void xgene_msi_isr(struct irq_desc *desc)
* CPU0
*/
hw_irq = hwirq_to_canonical_hwirq(hw_irq);
- virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq);
- WARN_ON(!virq);
- if (virq != 0)
- generic_handle_irq(virq);
+ ret = generic_handle_domain_irq(xgene_msi->inner_domain, hw_irq);
+ WARN_ON_ONCE(ret);
msir_val &= ~(1 << intr_index);
}
grp_select &= ~(1 << msir_index);
@@ -451,7 +448,6 @@ static int xgene_msi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(xgene_msi->msi_regs)) {
- dev_err(&pdev->dev, "no reg space\n");
rc = PTR_ERR(xgene_msi->msi_regs);
goto error;
}
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 98aa1dccc6e6..7b1d3ebc34ec 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -55,7 +55,7 @@ static void altera_msi_isr(struct irq_desc *desc)
struct altera_msi *msi;
unsigned long status;
u32 bit;
- u32 virq;
+ int ret;
chained_irq_enter(chip, desc);
msi = irq_desc_get_handler_data(desc);
@@ -65,11 +65,9 @@ static void altera_msi_isr(struct irq_desc *desc)
/* Dummy read from vector to clear the interrupt */
readl_relaxed(msi->vector_base + (bit * sizeof(u32)));
- virq = irq_find_mapping(msi->inner_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
- dev_err(&msi->pdev->dev, "unexpected MSI\n");
+ ret = generic_handle_domain_irq(msi->inner_domain, bit);
+ if (ret)
+ dev_err_ratelimited(&msi->pdev->dev, "unexpected MSI\n");
}
}
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index 523bd928b380..2513e9363236 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -646,7 +646,7 @@ static void altera_pcie_isr(struct irq_desc *desc)
struct device *dev;
unsigned long status;
u32 bit;
- u32 virq;
+ int ret;
chained_irq_enter(chip, desc);
pcie = irq_desc_get_handler_data(desc);
@@ -658,11 +658,9 @@ static void altera_pcie_isr(struct irq_desc *desc)
/* clear interrupts */
cra_writel(pcie, 1 << bit, P2A_INT_STATUS);
- virq = irq_find_mapping(pcie->irq_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
- dev_err(dev, "unexpected IRQ, INT%d\n", bit);
+ ret = generic_handle_domain_irq(pcie->irq_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit);
}
}
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 08bc788d9422..cc30215f5a43 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -476,7 +476,7 @@ static struct msi_domain_info brcm_msi_domain_info = {
static void brcm_pcie_msi_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned long status, virq;
+ unsigned long status;
struct brcm_msi *msi;
struct device *dev;
u32 bit;
@@ -489,10 +489,9 @@ static void brcm_pcie_msi_isr(struct irq_desc *desc)
status >>= msi->legacy_shift;
for_each_set_bit(bit, &status, msi->nr) {
- virq = irq_find_mapping(msi->inner_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
+ int ret;
+ ret = generic_handle_domain_irq(msi->inner_domain, bit);
+ if (ret)
dev_dbg(dev, "unexpected MSI\n");
}
diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c
index 56b8ee7bf330..f918c713afb0 100644
--- a/drivers/pci/controller/pcie-iproc-bcma.c
+++ b/drivers/pci/controller/pcie-iproc-bcma.c
@@ -35,7 +35,6 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
{
struct device *dev = &bdev->dev;
struct iproc_pcie *pcie;
- LIST_HEAD(resources);
struct pci_host_bridge *bridge;
int ret;
@@ -60,19 +59,16 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
pcie->mem.name = "PCIe MEM space";
pcie->mem.flags = IORESOURCE_MEM;
- pci_add_resource(&resources, &pcie->mem);
+ pci_add_resource(&bridge->windows, &pcie->mem);
+ ret = devm_request_pci_bus_resources(dev, &bridge->windows);
+ if (ret)
+ return ret;
pcie->map_irq = iproc_pcie_bcma_map_irq;
- ret = iproc_pcie_setup(pcie, &resources);
- if (ret) {
- dev_err(dev, "PCIe controller setup failed\n");
- pci_free_resource_list(&resources);
- return ret;
- }
-
bcma_set_drvdata(bdev, pcie);
- return 0;
+
+ return iproc_pcie_setup(pcie, &bridge->windows);
}
static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 35a82124a126..757b7fbcdc59 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -326,7 +326,6 @@ static void iproc_msi_handler(struct irq_desc *desc)
struct iproc_msi *msi;
u32 eq, head, tail, nr_events;
unsigned long hwirq;
- int virq;
chained_irq_enter(chip, desc);
@@ -362,8 +361,7 @@ static void iproc_msi_handler(struct irq_desc *desc)
/* process all outstanding events */
while (nr_events--) {
hwirq = decode_msi_hwirq(msi, eq, head);
- virq = irq_find_mapping(msi->inner_domain, hwirq);
- generic_handle_irq(virq);
+ generic_handle_domain_irq(msi->inner_domain, hwirq);
head++;
head %= EQ_LEN;
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index f3aeb8d4eaca..17c59b0d6978 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -645,7 +645,6 @@ static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
{
struct mtk_msi_set *msi_set = &port->msi_sets[set_idx];
unsigned long msi_enable, msi_status;
- unsigned int virq;
irq_hw_number_t bit, hwirq;
msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
@@ -659,8 +658,7 @@ static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
- virq = irq_find_mapping(port->msi_bottom_domain, hwirq);
- generic_handle_irq(virq);
+ generic_handle_domain_irq(port->msi_bottom_domain, hwirq);
}
} while (true);
}
@@ -670,18 +668,15 @@ static void mtk_pcie_irq_handler(struct irq_desc *desc)
struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned long status;
- unsigned int virq;
irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
chained_irq_enter(irqchip, desc);
status = readl_relaxed(port->base + PCIE_INT_STATUS_REG);
for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
- PCIE_INTX_SHIFT) {
- virq = irq_find_mapping(port->intx_domain,
- irq_bit - PCIE_INTX_SHIFT);
- generic_handle_irq(virq);
- }
+ PCIE_INTX_SHIFT)
+ generic_handle_domain_irq(port->intx_domain,
+ irq_bit - PCIE_INTX_SHIFT);
irq_bit = PCIE_MSI_SHIFT;
for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 25bee693834f..2f3f974977a3 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -14,6 +14,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/msi.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -23,6 +24,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include "../pci.h"
@@ -207,6 +209,7 @@ struct mtk_pcie_port {
* struct mtk_pcie - PCIe host information
* @dev: pointer to PCIe device
* @base: IO mapped register base
+ * @cfg: IO mapped register map for PCIe config
* @free_ck: free-run reference clock
* @mem: non-prefetchable memory resource
* @ports: pointer to PCIe port information
@@ -215,6 +218,7 @@ struct mtk_pcie_port {
struct mtk_pcie {
struct device *dev;
void __iomem *base;
+ struct regmap *cfg;
struct clk *free_ck;
struct list_head ports;
@@ -602,7 +606,6 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned long status;
- u32 virq;
u32 bit = INTX_SHIFT;
chained_irq_enter(irqchip, desc);
@@ -612,9 +615,8 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
/* Clear the INTx */
writel(1 << bit, port->base + PCIE_INT_STATUS);
- virq = irq_find_mapping(port->irq_domain,
- bit - INTX_SHIFT);
- generic_handle_irq(virq);
+ generic_handle_domain_irq(port->irq_domain,
+ bit - INTX_SHIFT);
}
}
@@ -623,10 +625,8 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
unsigned long imsi_status;
while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
- for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
- virq = irq_find_mapping(port->inner_domain, bit);
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM)
+ generic_handle_domain_irq(port->inner_domain, bit);
}
/* Clear MSI interrupt status */
writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
@@ -650,7 +650,11 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
return err;
}
- port->irq = platform_get_irq(pdev, port->slot);
+ if (of_find_property(dev->of_node, "interrupt-names", NULL))
+ port->irq = platform_get_irq_byname(pdev, "pcie_irq");
+ else
+ port->irq = platform_get_irq(pdev, port->slot);
+
if (port->irq < 0)
return port->irq;
@@ -682,6 +686,10 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
val |= PCIE_CSR_LTSSM_EN(port->slot) |
PCIE_CSR_ASPM_L1_EN(port->slot);
writel(val, pcie->base + PCIE_SYS_CFG_V2);
+ } else if (pcie->cfg) {
+ val = PCIE_CSR_LTSSM_EN(port->slot) |
+ PCIE_CSR_ASPM_L1_EN(port->slot);
+ regmap_update_bits(pcie->cfg, PCIE_SYS_CFG_V2, val, val);
}
/* Assert all reset signals */
@@ -985,6 +993,7 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *regs;
+ struct device_node *cfg_node;
int err;
/* get shared registers, which are optional */
@@ -995,6 +1004,14 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
return PTR_ERR(pcie->base);
}
+ cfg_node = of_find_compatible_node(NULL, NULL,
+ "mediatek,generic-pciecfg");
+ if (cfg_node) {
+ pcie->cfg = syscon_node_to_regmap(cfg_node);
+ if (IS_ERR(pcie->cfg))
+ return PTR_ERR(pcie->cfg);
+ }
+
pcie->free_ck = devm_clk_get(dev, "free_ck");
if (IS_ERR(pcie->free_ck)) {
if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
@@ -1027,22 +1044,27 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node, *child;
struct mtk_pcie_port *port, *tmp;
- int err;
+ int err, slot;
+
+ slot = of_get_pci_domain_nr(dev->of_node);
+ if (slot < 0) {
+ for_each_available_child_of_node(node, child) {
+ err = of_pci_get_devfn(child);
+ if (err < 0) {
+ dev_err(dev, "failed to get devfn: %d\n", err);
+ goto error_put_node;
+ }
- for_each_available_child_of_node(node, child) {
- int slot;
+ slot = PCI_SLOT(err);
- err = of_pci_get_devfn(child);
- if (err < 0) {
- dev_err(dev, "failed to parse devfn: %d\n", err);
- goto error_put_node;
+ err = mtk_pcie_parse_port(pcie, child, slot);
+ if (err)
+ goto error_put_node;
}
-
- slot = PCI_SLOT(err);
-
- err = mtk_pcie_parse_port(pcie, child, slot);
+ } else {
+ err = mtk_pcie_parse_port(pcie, node, slot);
if (err)
- goto error_put_node;
+ return err;
}
err = mtk_pcie_subsys_powerup(pcie);
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index fdab8202ae5d..329f930d17aa 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -412,16 +412,14 @@ static void mc_handle_msi(struct irq_desc *desc)
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
unsigned long status;
u32 bit;
- u32 virq;
+ int ret;
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
if (status & PM_MSI_INT_MSI_MASK) {
status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
for_each_set_bit(bit, &status, msi->num_vectors) {
- virq = irq_find_mapping(msi->dev_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
+ ret = generic_handle_domain_irq(msi->dev_domain, bit);
+ if (ret)
dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
bit);
}
@@ -570,17 +568,15 @@ static void mc_handle_intx(struct irq_desc *desc)
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
unsigned long status;
u32 bit;
- u32 virq;
+ int ret;
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
if (status & PM_MSI_INT_INTX_MASK) {
status &= PM_MSI_INT_INTX_MASK;
status >>= PM_MSI_INT_INTX_SHIFT;
for_each_set_bit(bit, &status, PCI_NUM_INTX) {
- virq = irq_find_mapping(port->intx_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
+ ret = generic_handle_domain_irq(port->intx_domain, bit);
+ if (ret)
dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
bit);
}
@@ -745,7 +741,7 @@ static void mc_handle_event(struct irq_desc *desc)
events = get_events(port);
for_each_set_bit(bit, &events, NUM_EVENTS)
- generic_handle_irq(irq_find_mapping(port->event_domain, bit));
+ generic_handle_domain_irq(port->event_domain, bit);
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index b4a288e24aaf..aa1cf24a5a72 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -159,7 +159,7 @@ static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep,
return 0;
}
-static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -195,7 +195,7 @@ static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT;
@@ -246,7 +246,7 @@ static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
return 0;
}
-static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -259,7 +259,8 @@ static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
clear_bit(atu_index + 1, ep->ib_window_map);
}
-static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
+static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
+ u8 interrupts)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
@@ -272,7 +273,7 @@ static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
return 0;
}
-static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
@@ -285,7 +286,7 @@ static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
}
-static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u64 pci_addr, size_t size)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -322,7 +323,7 @@ static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -403,7 +404,7 @@ static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
return 0;
}
-static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
@@ -451,7 +452,7 @@ static const struct pci_epc_features rcar_pcie_epc_features = {
};
static const struct pci_epc_features*
-rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
return &rcar_pcie_epc_features;
}
@@ -492,9 +493,9 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev)
pcie->dev = dev;
pm_runtime_enable(dev);
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err(dev, "pm_runtime_resume_and_get failed\n");
goto err_pm_disable;
}
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 765cf2b45e24..8f3131844e77 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -13,12 +13,14 @@
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -41,6 +43,21 @@ struct rcar_msi {
int irq2;
};
+#ifdef CONFIG_ARM
+/*
+ * Here we keep a static copy of the remapped PCIe controller address.
+ * This is only used on aarch32 systems, all of which have one single
+ * PCIe controller, to provide quick access to the PCIe controller in
+ * the L1 link state fixup function, called from the ARM fault handler.
+ */
+static void __iomem *pcie_base;
+/*
+ * Static copy of bus clock pointer, so we can check whether the clock
+ * is enabled or not.
+ */
+static struct clk *pcie_bus_clk;
+#endif
+
/* Structure representing the PCIe interface */
struct rcar_pcie_host {
struct rcar_pcie pcie;
@@ -486,12 +503,10 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
while (reg) {
unsigned int index = find_first_bit(&reg, 32);
- unsigned int msi_irq;
+ int ret;
- msi_irq = irq_find_mapping(msi->domain->parent, index);
- if (msi_irq) {
- generic_handle_irq(msi_irq);
- } else {
+ ret = generic_handle_domain_irq(msi->domain->parent, index);
+ if (ret) {
/* Unknown MSI, just clear it */
dev_dbg(dev, "unexpected MSI\n");
rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR);
@@ -776,6 +791,12 @@ static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
}
host->msi.irq2 = i;
+#ifdef CONFIG_ARM
+ /* Cache static copy for L1 link state fixup hook on aarch32 */
+ pcie_base = pcie->base;
+ pcie_bus_clk = host->bus_clk;
+#endif
+
return 0;
err_irq2:
@@ -1031,4 +1052,67 @@ static struct platform_driver rcar_pcie_driver = {
},
.probe = rcar_pcie_probe,
};
+
+#ifdef CONFIG_ARM
+static DEFINE_SPINLOCK(pmsr_lock);
+static int rcar_pcie_aarch32_abort_handler(unsigned long addr,
+ unsigned int fsr, struct pt_regs *regs)
+{
+ unsigned long flags;
+ u32 pmsr, val;
+ int ret = 0;
+
+ spin_lock_irqsave(&pmsr_lock, flags);
+
+ if (!pcie_base || !__clk_is_enabled(pcie_bus_clk)) {
+ ret = 1;
+ goto unlock_exit;
+ }
+
+ pmsr = readl(pcie_base + PMSR);
+
+ /*
+ * Test if the PCIe controller received PM_ENTER_L1 DLLP and
+ * the PCIe controller is not in L1 link state. If true, apply
+ * fix, which will put the controller into L1 link state, from
+ * which it can return to L0s/L0 on its own.
+ */
+ if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) {
+ writel(L1IATN, pcie_base + PMCTLR);
+ ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
+ val & L1FAEG, 10, 1000);
+ WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
+ writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
+ }
+
+unlock_exit:
+ spin_unlock_irqrestore(&pmsr_lock, flags);
+ return ret;
+}
+
+static const struct of_device_id rcar_pcie_abort_handler_of_match[] __initconst = {
+ { .compatible = "renesas,pcie-r8a7779" },
+ { .compatible = "renesas,pcie-r8a7790" },
+ { .compatible = "renesas,pcie-r8a7791" },
+ { .compatible = "renesas,pcie-rcar-gen2" },
+ {},
+};
+
+static int __init rcar_pcie_init(void)
+{
+ if (of_find_matching_node(NULL, rcar_pcie_abort_handler_of_match)) {
+#ifdef CONFIG_ARM_LPAE
+ hook_fault_code(17, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
+ "asynchronous external abort");
+#else
+ hook_fault_code(22, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+#endif
+ }
+
+ return platform_driver_register(&rcar_pcie_driver);
+}
+device_initcall(rcar_pcie_init);
+#else
builtin_platform_driver(rcar_pcie_driver);
+#endif
diff --git a/drivers/pci/controller/pcie-rcar.h b/drivers/pci/controller/pcie-rcar.h
index d4c698b5f821..9bb125db85c6 100644
--- a/drivers/pci/controller/pcie-rcar.h
+++ b/drivers/pci/controller/pcie-rcar.h
@@ -85,6 +85,13 @@
#define LTSMDIS BIT(31)
#define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK)
#define PMSR 0x01105c
+#define L1FAEG BIT(31)
+#define PMEL1RX BIT(23)
+#define PMSTATE GENMASK(18, 16)
+#define PMSTATE_L1 (3 << 16)
+#define PMCTLR 0x011060
+#define L1IATN BIT(31)
+
#define MACS2R 0x011078
#define MACCGSPSETR 0x011084
#define SPCNGRSN BIT(31)
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 7631dc3961c1..5fb9ce6e536e 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -122,7 +122,7 @@ static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r));
}
-static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -159,7 +159,7 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -227,7 +227,7 @@ static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -256,7 +256,7 @@ static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
}
-static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u64 pci_addr,
size_t size)
{
@@ -284,7 +284,7 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -308,7 +308,7 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
clear_bit(r, &ep->ob_region_map);
}
-static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
u8 multi_msg_cap)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -329,7 +329,7 @@ static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
@@ -471,7 +471,7 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
@@ -510,7 +510,7 @@ static const struct pci_epc_features rockchip_pcie_epc_features = {
};
static const struct pci_epc_features*
-rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
return &rockchip_pcie_epc_features;
}
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 78d04ac29cd5..c52316d0bfd2 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -517,7 +517,7 @@ static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
struct device *dev = rockchip->dev;
u32 reg;
u32 hwirq;
- u32 virq;
+ int ret;
chained_irq_enter(chip, desc);
@@ -528,10 +528,8 @@ static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
hwirq = ffs(reg) - 1;
reg &= ~BIT(hwirq);
- virq = irq_find_mapping(rockchip->irq_domain, hwirq);
- if (virq)
- generic_handle_irq(virq);
- else
+ ret = generic_handle_domain_irq(rockchip->irq_domain, hwirq);
+ if (ret)
dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
}
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index 67937facd90c..95426df03200 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -222,7 +222,7 @@ static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc)
pcie_read(port, XILINX_CPM_PCIE_REG_IDRN));
for_each_set_bit(i, &val, PCI_NUM_INTX)
- generic_handle_irq(irq_find_mapping(port->intx_domain, i));
+ generic_handle_domain_irq(port->intx_domain, i);
chained_irq_exit(chip, desc);
}
@@ -282,7 +282,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
for_each_set_bit(i, &val, 32)
- generic_handle_irq(irq_find_mapping(port->cpm_domain, i));
+ generic_handle_domain_irq(port->cpm_domain, i);
pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
/*
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 8689311c5ef6..a72b4f9a2b00 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -6,6 +6,7 @@
* (C) Copyright 2014 - 2015, Xilinx, Inc.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -169,6 +170,7 @@ struct nwl_pcie {
u8 last_busno;
struct nwl_msi msi;
struct irq_domain *legacy_irq_domain;
+ struct clk *clk;
raw_spinlock_t leg_mask_lock;
};
@@ -318,18 +320,14 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc)
struct nwl_pcie *pcie;
unsigned long status;
u32 bit;
- u32 virq;
chained_irq_enter(chip, desc);
pcie = irq_desc_get_handler_data(desc);
while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
MSGF_LEG_SR_MASKALL) != 0) {
- for_each_set_bit(bit, &status, PCI_NUM_INTX) {
- virq = irq_find_mapping(pcie->legacy_irq_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &status, PCI_NUM_INTX)
+ generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
}
chained_irq_exit(chip, desc);
@@ -340,16 +338,13 @@ static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg)
struct nwl_msi *msi;
unsigned long status;
u32 bit;
- u32 virq;
msi = &pcie->msi;
while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) {
for_each_set_bit(bit, &status, 32) {
nwl_bridge_writel(pcie, 1 << bit, status_reg);
- virq = irq_find_mapping(msi->dev_domain, bit);
- if (virq)
- generic_handle_irq(virq);
+ generic_handle_domain_irq(msi->dev_domain, bit);
}
}
}
@@ -823,6 +818,16 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+
+ err = clk_prepare_enable(pcie->clk);
+ if (err) {
+ dev_err(dev, "can't enable PCIe ref clock\n");
+ return err;
+ }
+
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(dev, "HW Initialization failed\n");
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index 14001febf59a..aa9bdcebc838 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -385,7 +385,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
}
if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) {
- unsigned int irq;
+ struct irq_domain *domain;
val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
@@ -399,19 +399,18 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
XILINX_PCIE_RPIFR2_MSG_DATA;
- irq = irq_find_mapping(port->msi_domain->parent, val);
+ domain = port->msi_domain->parent;
} else {
val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT;
- irq = irq_find_mapping(port->leg_domain, val);
+ domain = port->leg_domain;
}
/* Clear interrupt FIFO register 1 */
pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
XILINX_PCIE_REG_RPIFR1);
- if (irq)
- generic_handle_irq(irq);
+ generic_handle_domain_irq(domain, val);
}
if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index e3fcdfec58b3..a5987e52700e 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <linux/srcu.h>
#include <linux/rculist.h>
@@ -447,6 +448,56 @@ static struct pci_ops vmd_ops = {
.write = vmd_pci_write,
};
+#ifdef CONFIG_ACPI
+static struct acpi_device *vmd_acpi_find_companion(struct pci_dev *pci_dev)
+{
+ struct pci_host_bridge *bridge;
+ u32 busnr, addr;
+
+ if (pci_dev->bus->ops != &vmd_ops)
+ return NULL;
+
+ bridge = pci_find_host_bridge(pci_dev->bus);
+ busnr = pci_dev->bus->number - bridge->bus->number;
+ /*
+ * The address computation below is only applicable to relative bus
+ * numbers below 32.
+ */
+ if (busnr > 31)
+ return NULL;
+
+ addr = (busnr << 24) | ((u32)pci_dev->devfn << 16) | 0x8000FFFFU;
+
+ dev_dbg(&pci_dev->dev, "Looking for ACPI companion (address 0x%x)\n",
+ addr);
+
+ return acpi_find_child_device(ACPI_COMPANION(bridge->dev.parent), addr,
+ false);
+}
+
+static bool hook_installed;
+
+static void vmd_acpi_begin(void)
+{
+ if (pci_acpi_set_companion_lookup_hook(vmd_acpi_find_companion))
+ return;
+
+ hook_installed = true;
+}
+
+static void vmd_acpi_end(void)
+{
+ if (!hook_installed)
+ return;
+
+ pci_acpi_clear_companion_lookup_hook();
+ hook_installed = false;
+}
+#else
+static inline void vmd_acpi_begin(void) { }
+static inline void vmd_acpi_end(void) { }
+#endif /* CONFIG_ACPI */
+
static void vmd_attach_resources(struct vmd_dev *vmd)
{
vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
@@ -747,6 +798,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
if (vmd->irq_domain)
dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
+ vmd_acpi_begin();
+
pci_scan_child_bus(vmd->bus);
pci_assign_unassigned_bus_resources(vmd->bus);
@@ -760,6 +813,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
pci_bus_add_devices(vmd->bus);
+ vmd_acpi_end();
+
WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
"domain"), "Can't create symlink to domain\n");
return 0;
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index bce274d02dcf..8b4756159f15 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -87,6 +87,7 @@ struct epf_ntb {
struct epf_ntb_epc {
u8 func_no;
+ u8 vfunc_no;
bool linkup;
bool is_msix;
int msix_bar;
@@ -143,14 +144,15 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
struct epf_ntb_epc *ntb_epc;
struct epf_ntb_ctrl *ctrl;
struct pci_epc *epc;
+ u8 func_no, vfunc_no;
bool is_msix;
- u8 func_no;
int ret;
for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
is_msix = ntb_epc->is_msix;
ctrl = ntb_epc->reg;
if (link_up)
@@ -158,7 +160,7 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
else
ctrl->link_status &= ~LINK_STATUS_UP;
irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI;
- ret = pci_epc_raise_irq(epc, func_no, irq_type, 1);
+ ret = pci_epc_raise_irq(epc, func_no, vfunc_no, irq_type, 1);
if (ret) {
dev_err(&epc->dev,
"%s intf: Failed to raise Link Up IRQ\n",
@@ -238,10 +240,10 @@ static int epf_ntb_configure_mw(struct epf_ntb *ntb,
enum pci_barno peer_barno;
struct epf_ntb_ctrl *ctrl;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
u64 addr, size;
int ret = 0;
- u8 func_no;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
@@ -267,8 +269,9 @@ static int epf_ntb_configure_mw(struct epf_ntb *ntb,
}
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- ret = pci_epc_map_addr(epc, func_no, phys_addr, addr, size);
+ ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, addr, size);
if (ret)
dev_err(&epc->dev,
"%s intf: Failed to map memory window %d address\n",
@@ -296,8 +299,8 @@ static void epf_ntb_teardown_mw(struct epf_ntb *ntb,
enum pci_barno peer_barno;
struct epf_ntb_ctrl *ctrl;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
@@ -311,8 +314,9 @@ static void epf_ntb_teardown_mw(struct epf_ntb *ntb,
if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
phys_addr += ctrl->mw1_offset;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- pci_epc_unmap_addr(epc, func_no, phys_addr);
+ pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr);
}
/**
@@ -385,8 +389,8 @@ static int epf_ntb_configure_msi(struct epf_ntb *ntb,
struct epf_ntb_ctrl *peer_ctrl;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
int ret, i;
ntb_epc = ntb->epc[type];
@@ -400,8 +404,9 @@ static int epf_ntb_configure_msi(struct epf_ntb *ntb,
phys_addr = peer_epf_bar->phys_addr;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- ret = pci_epc_map_msi_irq(epc, func_no, phys_addr, db_count,
+ ret = pci_epc_map_msi_irq(epc, func_no, vfunc_no, phys_addr, db_count,
db_entry_size, &db_data, &db_offset);
if (ret) {
dev_err(&epc->dev, "%s intf: Failed to map MSI IRQ\n",
@@ -491,10 +496,10 @@ static int epf_ntb_configure_msix(struct epf_ntb *ntb,
u32 db_entry_size, msg_data;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
size_t align;
u64 msg_addr;
- u8 func_no;
int ret, i;
ntb_epc = ntb->epc[type];
@@ -512,12 +517,13 @@ static int epf_ntb_configure_msix(struct epf_ntb *ntb,
align = epc_features->align;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
db_entry_size = peer_ctrl->db_entry_size;
for (i = 0; i < db_count; i++) {
msg_addr = ALIGN_DOWN(msix_tbl[i].msg_addr, align);
msg_data = msix_tbl[i].msg_data;
- ret = pci_epc_map_addr(epc, func_no, phys_addr, msg_addr,
+ ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, msg_addr,
db_entry_size);
if (ret) {
dev_err(&epc->dev,
@@ -586,8 +592,8 @@ epf_ntb_teardown_db(struct epf_ntb *ntb, enum pci_epc_interface_type type)
struct pci_epf_bar *peer_epf_bar;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
@@ -597,8 +603,9 @@ epf_ntb_teardown_db(struct epf_ntb *ntb, enum pci_epc_interface_type type)
peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
phys_addr = peer_epf_bar->phys_addr;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- pci_epc_unmap_addr(epc, func_no, phys_addr);
+ pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr);
}
/**
@@ -728,14 +735,15 @@ static void epf_ntb_peer_spad_bar_clear(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
epf_bar = &ntb_epc->epf_bar[barno];
- pci_epc_clear_bar(epc, func_no, epf_bar);
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
/**
@@ -775,9 +783,9 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
struct pci_epf_bar *peer_epf_bar, *epf_bar;
enum pci_barno peer_barno, barno;
u32 peer_spad_offset;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
- u8 func_no;
int ret;
dev = &ntb->epf->dev;
@@ -790,6 +798,7 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
epf_bar = &ntb_epc->epf_bar[barno];
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
epc = ntb_epc->epc;
peer_spad_offset = peer_ntb_epc->reg->spad_offset;
@@ -798,7 +807,7 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
epf_bar->barno = barno;
epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
- ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s intf: peer SPAD BAR set failed\n",
pci_epc_interface_string(type));
@@ -842,14 +851,15 @@ static void epf_ntb_config_sspad_bar_clear(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
epf_bar = &ntb_epc->epf_bar[barno];
- pci_epc_clear_bar(epc, func_no, epf_bar);
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
/**
@@ -886,10 +896,10 @@ static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct epf_ntb *ntb;
struct pci_epc *epc;
struct device *dev;
- u8 func_no;
int ret;
ntb = ntb_epc->epf_ntb;
@@ -897,10 +907,11 @@ static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc)
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
epf_bar = &ntb_epc->epf_bar[barno];
- ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s inft: Config/Status/SPAD BAR set failed\n",
pci_epc_interface_string(ntb_epc->type));
@@ -1214,17 +1225,18 @@ static void epf_ntb_db_mw_bar_clear(struct epf_ntb_epc *ntb_epc)
struct pci_epf_bar *epf_bar;
enum epf_ntb_bar bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
barno = ntb_epc->epf_ntb_bar[bar];
epf_bar = &ntb_epc->epf_bar[barno];
- pci_epc_clear_bar(epc, func_no, epf_bar);
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
}
@@ -1263,10 +1275,10 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
const struct pci_epc_features *epc_features;
bool msix_capable, msi_capable;
struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
u32 db_count;
- u8 func_no;
int ret;
ntb_epc = ntb->epc[type];
@@ -1282,6 +1294,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
}
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
db_count = ntb->db_count;
if (db_count > MAX_DB_COUNT) {
@@ -1293,7 +1306,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
epc = ntb_epc->epc;
if (msi_capable) {
- ret = pci_epc_set_msi(epc, func_no, db_count);
+ ret = pci_epc_set_msi(epc, func_no, vfunc_no, db_count);
if (ret) {
dev_err(dev, "%s intf: MSI configuration failed\n",
pci_epc_interface_string(type));
@@ -1302,7 +1315,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
}
if (msix_capable) {
- ret = pci_epc_set_msix(epc, func_no, db_count,
+ ret = pci_epc_set_msix(epc, func_no, vfunc_no, db_count,
ntb_epc->msix_bar,
ntb_epc->msix_table_offset);
if (ret) {
@@ -1423,11 +1436,11 @@ static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
u32 num_mws, db_count;
enum epf_ntb_bar bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
size_t align;
int ret, i;
- u8 func_no;
u64 size;
ntb_epc = ntb->epc[type];
@@ -1437,6 +1450,7 @@ static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
epc_features = ntb_epc->epc_features;
align = epc_features->align;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
epc = ntb_epc->epc;
num_mws = ntb->num_mws;
db_count = ntb->db_count;
@@ -1464,7 +1478,7 @@ static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
barno = ntb_epc->epf_ntb_bar[bar];
epf_bar = &ntb_epc->epf_bar[barno];
- ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s intf: DoorBell BAR set failed\n",
pci_epc_interface_string(type));
@@ -1536,9 +1550,9 @@ static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
const struct pci_epc_features *epc_features;
struct pci_epf_bar *epf_bar;
struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
struct pci_epf *epf;
struct device *dev;
- u8 func_no;
dev = &ntb->epf->dev;
@@ -1547,6 +1561,7 @@ static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
return -ENOMEM;
epf = ntb->epf;
+ vfunc_no = epf->vfunc_no;
if (type == PRIMARY_INTERFACE) {
func_no = epf->func_no;
epf_bar = epf->bar;
@@ -1558,11 +1573,12 @@ static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
ntb_epc->linkup = false;
ntb_epc->epc = epc;
ntb_epc->func_no = func_no;
+ ntb_epc->vfunc_no = vfunc_no;
ntb_epc->type = type;
ntb_epc->epf_bar = epf_bar;
ntb_epc->epf_ntb = ntb;
- epc_features = pci_epc_get_features(epc, func_no);
+ epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
if (!epc_features)
return -EINVAL;
ntb_epc->epc_features = epc_features;
@@ -1702,10 +1718,10 @@ static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
enum pci_epc_interface_type type)
{
struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct pci_epf *epf;
struct device *dev;
- u8 func_no;
int ret;
ntb_epc = ntb->epc[type];
@@ -1713,6 +1729,7 @@ static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
dev = &epf->dev;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
ret = epf_ntb_config_sspad_bar_set(ntb->epc[type]);
if (ret) {
@@ -1742,11 +1759,13 @@ static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
goto err_db_mw_bar_init;
}
- ret = pci_epc_write_header(epc, func_no, epf->header);
- if (ret) {
- dev_err(dev, "%s intf: Configuration header write failed\n",
- pci_epc_interface_string(type));
- goto err_write_header;
+ if (vfunc_no <= 1) {
+ ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
+ if (ret) {
+ dev_err(dev, "%s intf: Configuration header write failed\n",
+ pci_epc_interface_string(type));
+ goto err_write_header;
+ }
}
INIT_DELAYED_WORK(&ntb->epc[type]->cmd_handler, epf_ntb_cmd_handler);
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index d2708ca4bece..90d84d3bc868 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -247,8 +247,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
+ reg->src_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map source address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
@@ -263,8 +263,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err_src_map_addr;
}
- ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
+ reg->dst_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map destination address\n");
reg->status = STATUS_DST_ADDR_INVALID;
@@ -291,13 +291,13 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
err_dst_addr:
pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
err_src_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
err_src_addr:
pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
@@ -331,8 +331,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
+ reg->src_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
@@ -386,7 +386,7 @@ err_dma_map:
kfree(buf);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
err_addr:
pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
@@ -419,8 +419,8 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
+ reg->dst_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map address\n");
reg->status = STATUS_DST_ADDR_INVALID;
@@ -479,7 +479,7 @@ err_dma_map:
kfree(buf);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
err_addr:
pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
@@ -501,13 +501,16 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
switch (irq_type) {
case IRQ_TYPE_LEGACY:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_LEGACY, 0);
break;
case IRQ_TYPE_MSI:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSI, irq);
break;
case IRQ_TYPE_MSIX:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSIX, irq);
break;
default:
dev_err(dev, "Failed to raise IRQ, unknown type\n");
@@ -542,7 +545,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
if (command & COMMAND_RAISE_LEGACY_IRQ) {
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_LEGACY, 0);
goto reset_handler;
}
@@ -580,22 +584,22 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
}
if (command & COMMAND_RAISE_MSI_IRQ) {
- count = pci_epc_get_msi(epc, epf->func_no);
+ count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
if (reg->irq_number > count || count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
- reg->irq_number);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSI, reg->irq_number);
goto reset_handler;
}
if (command & COMMAND_RAISE_MSIX_IRQ) {
- count = pci_epc_get_msix(epc, epf->func_no);
+ count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
if (reg->irq_number > count || count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
- reg->irq_number);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSIX, reg->irq_number);
goto reset_handler;
}
@@ -618,7 +622,8 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
epf_bar = &epf->bar[bar];
if (epf_test->reg[bar]) {
- pci_epc_clear_bar(epc, epf->func_no, epf_bar);
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
+ epf_bar);
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
}
@@ -650,7 +655,8 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
if (!!(epc_features->reserved_bar & (1 << bar)))
continue;
- ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
+ epf_bar);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
@@ -674,16 +680,18 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
bool msi_capable = true;
int ret;
- epc_features = pci_epc_get_features(epc, epf->func_no);
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
if (epc_features) {
msix_capable = epc_features->msix_capable;
msi_capable = epc_features->msi_capable;
}
- ret = pci_epc_write_header(epc, epf->func_no, header);
- if (ret) {
- dev_err(dev, "Configuration header write failed\n");
- return ret;
+ if (epf->vfunc_no <= 1) {
+ ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
+ if (ret) {
+ dev_err(dev, "Configuration header write failed\n");
+ return ret;
+ }
}
ret = pci_epf_test_set_bar(epf);
@@ -691,7 +699,8 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
return ret;
if (msi_capable) {
- ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
+ ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
+ epf->msi_interrupts);
if (ret) {
dev_err(dev, "MSI configuration failed\n");
return ret;
@@ -699,7 +708,8 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
if (msix_capable) {
- ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts,
+ ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
+ epf->msix_interrupts,
epf_test->test_reg_bar,
epf_test->msix_table_offset);
if (ret) {
@@ -832,7 +842,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (WARN_ON_ONCE(!epc))
return -EINVAL;
- epc_features = pci_epc_get_features(epc, epf->func_no);
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
if (!epc_features) {
dev_err(&epf->dev, "epc_features not implemented\n");
return -EOPNOTSUPP;
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index f3a8b833b479..999911801877 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -475,6 +475,28 @@ static struct configfs_attribute *pci_epf_attrs[] = {
NULL,
};
+static int pci_epf_vepf_link(struct config_item *epf_pf_item,
+ struct config_item *epf_vf_item)
+{
+ struct pci_epf_group *epf_vf_group = to_pci_epf_group(epf_vf_item);
+ struct pci_epf_group *epf_pf_group = to_pci_epf_group(epf_pf_item);
+ struct pci_epf *epf_pf = epf_pf_group->epf;
+ struct pci_epf *epf_vf = epf_vf_group->epf;
+
+ return pci_epf_add_vepf(epf_pf, epf_vf);
+}
+
+static void pci_epf_vepf_unlink(struct config_item *epf_pf_item,
+ struct config_item *epf_vf_item)
+{
+ struct pci_epf_group *epf_vf_group = to_pci_epf_group(epf_vf_item);
+ struct pci_epf_group *epf_pf_group = to_pci_epf_group(epf_pf_item);
+ struct pci_epf *epf_pf = epf_pf_group->epf;
+ struct pci_epf *epf_vf = epf_vf_group->epf;
+
+ pci_epf_remove_vepf(epf_pf, epf_vf);
+}
+
static void pci_epf_release(struct config_item *item)
{
struct pci_epf_group *epf_group = to_pci_epf_group(item);
@@ -487,6 +509,8 @@ static void pci_epf_release(struct config_item *item)
}
static struct configfs_item_operations pci_epf_ops = {
+ .allow_link = pci_epf_vepf_link,
+ .drop_link = pci_epf_vepf_unlink,
.release = pci_epf_release,
};
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index adec9bee72cf..ecbb0fb3b653 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -137,24 +137,29 @@ EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
* @epc: the features supported by *this* EPC device will be returned
* @func_no: the features supported by the EPC device specific to the
* endpoint function with func_no will be returned
+ * @vfunc_no: the features supported by the EPC device specific to the
+ * virtual endpoint function with vfunc_no will be returned
*
* Invoke to get the features provided by the EPC which may be
* specific to an endpoint function. Returns pci_epc_features on success
* and NULL for any failures.
*/
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
- u8 func_no)
+ u8 func_no, u8 vfunc_no)
{
const struct pci_epc_features *epc_features;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return NULL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return NULL;
+
if (!epc->ops->get_features)
return NULL;
mutex_lock(&epc->lock);
- epc_features = epc->ops->get_features(epc, func_no);
+ epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
return epc_features;
@@ -205,13 +210,14 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
/**
* pci_epc_raise_irq() - interrupt the host system
* @epc: the EPC device which has to interrupt the host
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @type: specify the type of interrupt; legacy, MSI or MSI-X
* @interrupt_num: the MSI or MSI-X interrupt number
*
* Invoke to raise an legacy, MSI or MSI-X interrupt
*/
-int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num)
{
int ret;
@@ -219,11 +225,14 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->raise_irq)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
+ ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
mutex_unlock(&epc->lock);
return ret;
@@ -235,6 +244,7 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
* MSI data
* @epc: the EPC device which has the MSI capability
* @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @phys_addr: the physical address of the outbound region
* @interrupt_num: the MSI interrupt number
* @entry_size: Size of Outbound address region for each interrupt
@@ -250,21 +260,25 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
* physical address (in outbound region) of the other interface to ring
* doorbell.
*/
-int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr,
- u8 interrupt_num, u32 entry_size, u32 *msi_data,
- u32 *msi_addr_offset)
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
+ u32 *msi_data, u32 *msi_addr_offset)
{
int ret;
if (IS_ERR_OR_NULL(epc))
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->map_msi_irq)
return -EINVAL;
mutex_lock(&epc->lock);
- ret = epc->ops->map_msi_irq(epc, func_no, phys_addr, interrupt_num,
- entry_size, msi_data, msi_addr_offset);
+ ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
+ interrupt_num, entry_size, msi_data,
+ msi_addr_offset);
mutex_unlock(&epc->lock);
return ret;
@@ -274,22 +288,26 @@ EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
/**
* pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
* @epc: the EPC device to which MSI interrupts was requested
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
*
* Invoke to get the number of MSI interrupts allocated by the RC
*/
-int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
int interrupt;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return 0;
+
if (!epc->ops->get_msi)
return 0;
mutex_lock(&epc->lock);
- interrupt = epc->ops->get_msi(epc, func_no);
+ interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
if (interrupt < 0)
@@ -304,12 +322,13 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
/**
* pci_epc_set_msi() - set the number of MSI interrupt numbers required
* @epc: the EPC device on which MSI has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @interrupts: number of MSI interrupts required by the EPF
*
* Invoke to set the required number of MSI interrupts.
*/
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
{
int ret;
u8 encode_int;
@@ -318,13 +337,16 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
interrupts > 32)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_msi)
return 0;
encode_int = order_base_2(interrupts);
mutex_lock(&epc->lock);
- ret = epc->ops->set_msi(epc, func_no, encode_int);
+ ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
mutex_unlock(&epc->lock);
return ret;
@@ -334,22 +356,26 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msi);
/**
* pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
* @epc: the EPC device to which MSI-X interrupts was requested
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
*
* Invoke to get the number of MSI-X interrupts allocated by the RC
*/
-int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
int interrupt;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return 0;
+
if (!epc->ops->get_msix)
return 0;
mutex_lock(&epc->lock);
- interrupt = epc->ops->get_msix(epc, func_no);
+ interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
if (interrupt < 0)
@@ -362,15 +388,16 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix);
/**
* pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
* @epc: the EPC device on which MSI-X has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @interrupts: number of MSI-X interrupts required by the EPF
* @bir: BAR where the MSI-X table resides
* @offset: Offset pointing to the start of MSI-X table
*
* Invoke to set the required number of MSI-X interrupts.
*/
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno bir, u32 offset)
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno bir, u32 offset)
{
int ret;
@@ -378,11 +405,15 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
interrupts < 1 || interrupts > 2048)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_msix)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->set_msix(epc, func_no, interrupts - 1, bir, offset);
+ ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
+ offset);
mutex_unlock(&epc->lock);
return ret;
@@ -392,22 +423,26 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix);
/**
* pci_epc_unmap_addr() - unmap CPU address from PCI address
* @epc: the EPC device on which address is allocated
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @phys_addr: physical address of the local system
*
* Invoke to unmap the CPU address from PCI address.
*/
-void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr)
{
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return;
+
if (!epc->ops->unmap_addr)
return;
mutex_lock(&epc->lock);
- epc->ops->unmap_addr(epc, func_no, phys_addr);
+ epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
@@ -415,14 +450,15 @@ EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
/**
* pci_epc_map_addr() - map CPU address to PCI address
* @epc: the EPC device on which address is allocated
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @phys_addr: physical address of the local system
* @pci_addr: PCI address to which the physical address should be mapped
* @size: the size of the allocation
*
* Invoke to map CPU address with PCI address.
*/
-int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u64 pci_addr, size_t size)
{
int ret;
@@ -430,11 +466,15 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->map_addr)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
+ ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
+ size);
mutex_unlock(&epc->lock);
return ret;
@@ -444,12 +484,13 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
/**
* pci_epc_clear_bar() - reset the BAR
* @epc: the EPC device for which the BAR has to be cleared
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @epf_bar: the struct epf_bar that contains the BAR information
*
* Invoke to reset the BAR of the endpoint device.
*/
-void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
@@ -457,11 +498,14 @@ void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
return;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return;
+
if (!epc->ops->clear_bar)
return;
mutex_lock(&epc->lock);
- epc->ops->clear_bar(epc, func_no, epf_bar);
+ epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
@@ -469,12 +513,13 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
/**
* pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
* @epc: the EPC device on which BAR has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @epf_bar: the struct epf_bar that contains the BAR information
*
* Invoke to configure the BAR of the endpoint device.
*/
-int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int ret;
@@ -489,11 +534,14 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_bar)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->set_bar(epc, func_no, epf_bar);
+ ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
mutex_unlock(&epc->lock);
return ret;
@@ -503,7 +551,8 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
/**
* pci_epc_write_header() - write standard configuration header
* @epc: the EPC device to which the configuration header should be written
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @header: standard configuration header fields
*
* Invoke to write the configuration header to the endpoint controller. Every
@@ -511,7 +560,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
* configuration header would be written. The callback function should write
* the header fields to this dedicated location.
*/
-int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
+int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *header)
{
int ret;
@@ -519,11 +568,18 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
+ /* Only Virtual Function #1 has deviceID */
+ if (vfunc_no > 1)
+ return -EINVAL;
+
if (!epc->ops->write_header)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->write_header(epc, func_no, header);
+ ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
mutex_unlock(&epc->lock);
return ret;
@@ -548,7 +604,7 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
u32 func_no;
int ret = 0;
- if (IS_ERR_OR_NULL(epc))
+ if (IS_ERR_OR_NULL(epc) || epf->is_vf)
return -EINVAL;
if (type == PRIMARY_INTERFACE && epf->epc)
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 502eb79cd551..8aea16380870 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -62,13 +62,20 @@ EXPORT_SYMBOL_GPL(pci_epf_type_add_cfs);
*/
void pci_epf_unbind(struct pci_epf *epf)
{
+ struct pci_epf *epf_vf;
+
if (!epf->driver) {
dev_WARN(&epf->dev, "epf device not bound to driver\n");
return;
}
mutex_lock(&epf->lock);
- epf->driver->ops->unbind(epf);
+ list_for_each_entry(epf_vf, &epf->pci_vepf, list) {
+ if (epf_vf->is_bound)
+ epf_vf->driver->ops->unbind(epf_vf);
+ }
+ if (epf->is_bound)
+ epf->driver->ops->unbind(epf);
mutex_unlock(&epf->lock);
module_put(epf->driver->owner);
}
@@ -83,10 +90,14 @@ EXPORT_SYMBOL_GPL(pci_epf_unbind);
*/
int pci_epf_bind(struct pci_epf *epf)
{
+ struct device *dev = &epf->dev;
+ struct pci_epf *epf_vf;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
int ret;
if (!epf->driver) {
- dev_WARN(&epf->dev, "epf device not bound to driver\n");
+ dev_WARN(dev, "epf device not bound to driver\n");
return -EINVAL;
}
@@ -94,14 +105,141 @@ int pci_epf_bind(struct pci_epf *epf)
return -EAGAIN;
mutex_lock(&epf->lock);
+ list_for_each_entry(epf_vf, &epf->pci_vepf, list) {
+ vfunc_no = epf_vf->vfunc_no;
+
+ if (vfunc_no < 1) {
+ dev_err(dev, "Invalid virtual function number\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ epc = epf->epc;
+ func_no = epf->func_no;
+ if (!IS_ERR_OR_NULL(epc)) {
+ if (!epc->max_vfs) {
+ dev_err(dev, "No support for virt function\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ if (vfunc_no > epc->max_vfs[func_no]) {
+ dev_err(dev, "PF%d: Exceeds max vfunc number\n",
+ func_no);
+ ret = -EINVAL;
+ goto ret;
+ }
+ }
+
+ epc = epf->sec_epc;
+ func_no = epf->sec_epc_func_no;
+ if (!IS_ERR_OR_NULL(epc)) {
+ if (!epc->max_vfs) {
+ dev_err(dev, "No support for virt function\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ if (vfunc_no > epc->max_vfs[func_no]) {
+ dev_err(dev, "PF%d: Exceeds max vfunc number\n",
+ func_no);
+ ret = -EINVAL;
+ goto ret;
+ }
+ }
+
+ epf_vf->func_no = epf->func_no;
+ epf_vf->sec_epc_func_no = epf->sec_epc_func_no;
+ epf_vf->epc = epf->epc;
+ epf_vf->sec_epc = epf->sec_epc;
+ ret = epf_vf->driver->ops->bind(epf_vf);
+ if (ret)
+ goto ret;
+ epf_vf->is_bound = true;
+ }
+
ret = epf->driver->ops->bind(epf);
+ if (ret)
+ goto ret;
+ epf->is_bound = true;
+
+ mutex_unlock(&epf->lock);
+ return 0;
+
+ret:
mutex_unlock(&epf->lock);
+ pci_epf_unbind(epf);
return ret;
}
EXPORT_SYMBOL_GPL(pci_epf_bind);
/**
+ * pci_epf_add_vepf() - associate virtual EP function to physical EP function
+ * @epf_pf: the physical EP function to which the virtual EP function should be
+ * associated
+ * @epf_vf: the virtual EP function to be added
+ *
+ * A physical endpoint function can be associated with multiple virtual
+ * endpoint functions. Invoke pci_epf_add_epf() to add a virtual PCI endpoint
+ * function to a physical PCI endpoint function.
+ */
+int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
+{
+ u32 vfunc_no;
+
+ if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf))
+ return -EINVAL;
+
+ if (epf_pf->epc || epf_vf->epc || epf_vf->epf_pf)
+ return -EBUSY;
+
+ if (epf_pf->sec_epc || epf_vf->sec_epc)
+ return -EBUSY;
+
+ mutex_lock(&epf_pf->lock);
+ vfunc_no = find_first_zero_bit(&epf_pf->vfunction_num_map,
+ BITS_PER_LONG);
+ if (vfunc_no >= BITS_PER_LONG) {
+ mutex_unlock(&epf_pf->lock);
+ return -EINVAL;
+ }
+
+ set_bit(vfunc_no, &epf_pf->vfunction_num_map);
+ epf_vf->vfunc_no = vfunc_no;
+
+ epf_vf->epf_pf = epf_pf;
+ epf_vf->is_vf = true;
+
+ list_add_tail(&epf_vf->list, &epf_pf->pci_vepf);
+ mutex_unlock(&epf_pf->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_add_vepf);
+
+/**
+ * pci_epf_remove_vepf() - remove virtual EP function from physical EP function
+ * @epf_pf: the physical EP function from which the virtual EP function should
+ * be removed
+ * @epf_vf: the virtual EP function to be removed
+ *
+ * Invoke to remove a virtual endpoint function from the physcial endpoint
+ * function.
+ */
+void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
+{
+ if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf))
+ return;
+
+ mutex_lock(&epf_pf->lock);
+ clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map);
+ list_del(&epf_vf->list);
+ mutex_unlock(&epf_pf->lock);
+}
+EXPORT_SYMBOL_GPL(pci_epf_remove_vepf);
+
+/**
* pci_epf_free_space() - free the allocated PCI EPF register space
* @epf: the EPF device from whom to free the memory
* @addr: the virtual address of the PCI EPF register space
@@ -317,6 +455,10 @@ struct pci_epf *pci_epf_create(const char *name)
return ERR_PTR(-ENOMEM);
}
+ /* VFs are numbered starting with 1. So set BIT(0) by default */
+ epf->vfunction_num_map = 1;
+ INIT_LIST_HEAD(&epf->pci_vepf);
+
dev = &epf->dev;
device_initialize(dev);
dev->bus = &pci_epf_bus_type;
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index e01d53f5b32f..afa50b446567 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -23,6 +23,7 @@ struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus)
return to_pci_host_bridge(root_bus->bridge);
}
+EXPORT_SYMBOL_GPL(pci_find_host_bridge);
struct device *pci_get_host_bridge_device(struct pci_dev *dev)
{
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
index a32070be5adf..cc6194aa24c1 100644
--- a/drivers/pci/hotplug/TODO
+++ b/drivers/pci/hotplug/TODO
@@ -40,9 +40,6 @@ ibmphp:
* The return value of pci_hp_register() is not checked.
-* iounmap(io_mem) is called in the error path of ebda_rsrc_controller()
- and once more in the error path of its caller ibmphp_access_ebda().
-
* The various slot data structures are difficult to follow and need to be
simplified. A lot of functions are too large and too complex, they need
to be broken up into smaller, manageable pieces. Negative examples are
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 11a2661dc062..7fb75401ad8a 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -714,8 +714,7 @@ static int __init ebda_rsrc_controller(void)
/* init hpc structure */
hpc_ptr = alloc_ebda_hpc(slot_num, bus_num);
if (!hpc_ptr) {
- rc = -ENOMEM;
- goto error_no_hpc;
+ return -ENOMEM;
}
hpc_ptr->ctlr_id = ctlr_id;
hpc_ptr->ctlr_relative_id = ctlr;
@@ -910,8 +909,6 @@ error:
kfree(tmp_slot);
error_no_slot:
free_ebda_hpc(hpc_ptr);
-error_no_hpc:
- iounmap(io_mem);
return rc;
}
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index d4a930881054..69fd401691be 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -184,7 +184,7 @@ void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot);
-int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe);
+int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe);
int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status);
int pciehp_set_raw_indicator_status(struct hotplug_slot *h_slot, u8 status);
int pciehp_get_raw_indicator_status(struct hotplug_slot *h_slot, u8 *status);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 9d06939736c0..3024d7e85e6a 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -870,7 +870,7 @@ void pcie_disable_interrupt(struct controller *ctrl)
* momentarily, if we see that they could interfere. Also, clear any spurious
* events after.
*/
-int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe)
+int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
{
struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 04565162a449..f4c2e6e01be0 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -526,7 +526,7 @@ scan:
return 0;
}
-static int pnv_php_reset_slot(struct hotplug_slot *slot, int probe)
+static int pnv_php_reset_slot(struct hotplug_slot *slot, bool probe)
{
struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
struct pci_dev *bridge = php_slot->pdev;
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index a143b02b2dcd..d84381ce82b5 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -310,7 +310,7 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
/* Check for ranges property */
err = of_pci_range_parser_init(&parser, dev_node);
if (err)
- goto failed;
+ return 0;
dev_dbg(dev, "Parsing ranges property...\n");
for_each_of_pci_range(&parser, &range) {
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 36bc23e21759..a1b1e2a01632 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -17,6 +17,7 @@
#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
+#include <linux/rwsem.h>
#include "pci.h"
/*
@@ -934,58 +935,77 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
static struct acpi_device *acpi_pci_find_companion(struct device *dev);
+void pci_set_acpi_fwnode(struct pci_dev *dev)
+{
+ if (!ACPI_COMPANION(&dev->dev) && !pci_dev_is_added(dev))
+ ACPI_COMPANION_SET(&dev->dev,
+ acpi_pci_find_companion(&dev->dev));
+}
+
+/**
+ * pci_dev_acpi_reset - do a function level reset using _RST method
+ * @dev: device to reset
+ * @probe: if true, return 0 if device supports _RST
+ */
+int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
+{
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
+
+ if (!handle || !acpi_has_method(handle, "_RST"))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL, NULL))) {
+ pci_warn(dev, "ACPI _RST failed\n");
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static bool acpi_pci_power_manageable(struct pci_dev *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+
+ if (!adev)
+ return false;
+ return acpi_device_power_manageable(adev);
+}
+
static bool acpi_pci_bridge_d3(struct pci_dev *dev)
{
- const struct fwnode_handle *fwnode;
+ const union acpi_object *obj;
struct acpi_device *adev;
- struct pci_dev *root;
- u8 val;
+ struct pci_dev *rpdev;
if (!dev->is_hotplug_bridge)
return false;
/* Assume D3 support if the bridge is power-manageable by ACPI. */
- adev = ACPI_COMPANION(&dev->dev);
- if (!adev && !pci_dev_is_added(dev)) {
- adev = acpi_pci_find_companion(&dev->dev);
- ACPI_COMPANION_SET(&dev->dev, adev);
- }
-
- if (adev && acpi_device_power_manageable(adev))
+ if (acpi_pci_power_manageable(dev))
return true;
/*
- * Look for a special _DSD property for the root port and if it
- * is set we know the hierarchy behind it supports D3 just fine.
+ * The ACPI firmware will provide the device-specific properties through
+ * _DSD configuration object. Look for the 'HotPlugSupportInD3' property
+ * for the root port and if it is set we know the hierarchy behind it
+ * supports D3 just fine.
*/
- root = pcie_find_root_port(dev);
- if (!root)
+ rpdev = pcie_find_root_port(dev);
+ if (!rpdev)
return false;
- adev = ACPI_COMPANION(&root->dev);
- if (root == dev) {
- /*
- * It is possible that the ACPI companion is not yet bound
- * for the root port so look it up manually here.
- */
- if (!adev && !pci_dev_is_added(root))
- adev = acpi_pci_find_companion(&root->dev);
- }
-
+ adev = ACPI_COMPANION(&rpdev->dev);
if (!adev)
return false;
- fwnode = acpi_fwnode_handle(adev);
- if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val))
+ if (acpi_dev_get_property(adev, "HotPlugSupportInD3",
+ ACPI_TYPE_INTEGER, &obj) < 0)
return false;
- return val == 1;
-}
-
-static bool acpi_pci_power_manageable(struct pci_dev *dev)
-{
- struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
- return adev ? acpi_device_power_manageable(adev) : false;
+ return obj->integer.value == 1;
}
static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
@@ -1159,6 +1179,69 @@ void acpi_pci_remove_bus(struct pci_bus *bus)
}
/* ACPI bus type */
+
+
+static DECLARE_RWSEM(pci_acpi_companion_lookup_sem);
+static struct acpi_device *(*pci_acpi_find_companion_hook)(struct pci_dev *);
+
+/**
+ * pci_acpi_set_companion_lookup_hook - Set ACPI companion lookup callback.
+ * @func: ACPI companion lookup callback pointer or NULL.
+ *
+ * Set a special ACPI companion lookup callback for PCI devices whose companion
+ * objects in the ACPI namespace have _ADR with non-standard bus-device-function
+ * encodings.
+ *
+ * Return 0 on success or a negative error code on failure (in which case no
+ * changes are made).
+ *
+ * The caller is responsible for the appropriate ordering of the invocations of
+ * this function with respect to the enumeration of the PCI devices needing the
+ * callback installed by it.
+ */
+int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *))
+{
+ int ret;
+
+ if (!func)
+ return -EINVAL;
+
+ down_write(&pci_acpi_companion_lookup_sem);
+
+ if (pci_acpi_find_companion_hook) {
+ ret = -EBUSY;
+ } else {
+ pci_acpi_find_companion_hook = func;
+ ret = 0;
+ }
+
+ up_write(&pci_acpi_companion_lookup_sem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_acpi_set_companion_lookup_hook);
+
+/**
+ * pci_acpi_clear_companion_lookup_hook - Clear ACPI companion lookup callback.
+ *
+ * Clear the special ACPI companion lookup callback previously set by
+ * pci_acpi_set_companion_lookup_hook(). Block until the last running instance
+ * of the callback returns before clearing it.
+ *
+ * The caller is responsible for the appropriate ordering of the invocations of
+ * this function with respect to the enumeration of the PCI devices needing the
+ * callback cleared by it.
+ */
+void pci_acpi_clear_companion_lookup_hook(void)
+{
+ down_write(&pci_acpi_companion_lookup_sem);
+
+ pci_acpi_find_companion_hook = NULL;
+
+ up_write(&pci_acpi_companion_lookup_sem);
+}
+EXPORT_SYMBOL_GPL(pci_acpi_clear_companion_lookup_hook);
+
static struct acpi_device *acpi_pci_find_companion(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -1166,6 +1249,16 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev)
bool check_children;
u64 addr;
+ down_read(&pci_acpi_companion_lookup_sem);
+
+ adev = pci_acpi_find_companion_hook ?
+ pci_acpi_find_companion_hook(pci_dev) : NULL;
+
+ up_read(&pci_acpi_companion_lookup_sem);
+
+ if (adev)
+ return adev;
+
check_children = pci_is_bridge(pci_dev);
/* Please ref to ACPI spec for the syntax of _ADR */
addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
index b31883022a8e..49bbd37ee318 100644
--- a/drivers/pci/pci-bridge-emul.h
+++ b/drivers/pci/pci-bridge-emul.h
@@ -54,7 +54,7 @@ struct pci_bridge_emul_pcie_conf {
__le16 slotctl;
__le16 slotsta;
__le16 rootctl;
- __le16 rsvd;
+ __le16 rootcap;
__le32 rootsta;
__le32 devcap2;
__le16 devctl2;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index a0615395500a..2761ab86490d 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -136,7 +136,7 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
struct pci_dev *dev)
{
struct pci_dynid *dynid;
- const struct pci_device_id *found_id = NULL;
+ const struct pci_device_id *found_id = NULL, *ids;
/* When driver_override is set, only bind to the matching driver */
if (dev->driver_override && strcmp(dev->driver_override, drv->name))
@@ -152,14 +152,28 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
}
spin_unlock(&drv->dynids.lock);
- if (!found_id)
- found_id = pci_match_id(drv->id_table, dev);
+ if (found_id)
+ return found_id;
- /* driver_override will always match, send a dummy id */
- if (!found_id && dev->driver_override)
- found_id = &pci_device_id_any;
+ for (ids = drv->id_table; (found_id = pci_match_id(ids, dev));
+ ids = found_id + 1) {
+ /*
+ * The match table is split based on driver_override.
+ * In case override_only was set, enforce driver_override
+ * matching.
+ */
+ if (found_id->override_only) {
+ if (dev->driver_override)
+ return found_id;
+ } else {
+ return found_id;
+ }
+ }
- return found_id;
+ /* driver_override will always match, send a dummy id */
+ if (dev->driver_override)
+ return &pci_device_id_any;
+ return NULL;
}
/**
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index b70f61fbcd4b..7fb5cd17cc98 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1367,7 +1367,7 @@ static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj,
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- if (!pdev->reset_fn)
+ if (!pci_reset_supported(pdev))
return 0;
return a->mode;
@@ -1491,6 +1491,7 @@ const struct attribute_group *pci_dev_groups[] = {
&pci_dev_config_attr_group,
&pci_dev_rom_attr_group,
&pci_dev_reset_attr_group,
+ &pci_dev_reset_method_attr_group,
&pci_dev_vpd_attr_group,
#ifdef CONFIG_DMI
&pci_dev_smbios_attr_group,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a5e6759c407b..ce2ab62b64cf 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -31,6 +31,7 @@
#include <linux/vmalloc.h>
#include <asm/dma.h>
#include <linux/aer.h>
+#include <linux/bitfield.h>
#include "pci.h"
DEFINE_MUTEX(pci_slot_mutex);
@@ -72,6 +73,11 @@ static void pci_dev_d3_sleep(struct pci_dev *dev)
msleep(delay);
}
+bool pci_reset_supported(struct pci_dev *dev)
+{
+ return dev->reset_methods[0] != 0;
+}
+
#ifdef CONFIG_PCI_DOMAINS
int pci_domains_supported = 1;
#endif
@@ -206,32 +212,36 @@ int pci_status_get_and_clear_errors(struct pci_dev *pdev)
EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
#ifdef CONFIG_HAS_IOMEM
-void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
+static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
+ bool write_combine)
{
struct resource *res = &pdev->resource[bar];
+ resource_size_t start = res->start;
+ resource_size_t size = resource_size(res);
/*
* Make sure the BAR is actually a memory resource, not an IO resource
*/
if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
- pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
+ pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
return NULL;
}
- return ioremap(res->start, resource_size(res));
+
+ if (write_combine)
+ return ioremap_wc(start, size);
+
+ return ioremap(start, size);
+}
+
+void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
+{
+ return __pci_ioremap_resource(pdev, bar, false);
}
EXPORT_SYMBOL_GPL(pci_ioremap_bar);
void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
{
- /*
- * Make sure the BAR is actually a memory resource, not an IO resource
- */
- if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
- WARN_ON(1);
- return NULL;
- }
- return ioremap_wc(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar));
+ return __pci_ioremap_resource(pdev, bar, true);
}
EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
#endif
@@ -265,7 +275,7 @@ static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
*endptr = strchrnul(path, ';');
- wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
+ wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
if (!wpath)
return -ENOMEM;
@@ -915,8 +925,8 @@ static void pci_std_enable_acs(struct pci_dev *dev)
/* Upstream Forwarding */
ctrl |= (cap & PCI_ACS_UF);
- /* Enable Translation Blocking for external devices */
- if (dev->external_facing || dev->untrusted)
+ /* Enable Translation Blocking for external devices and noats */
+ if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
ctrl |= (cap & PCI_ACS_TB);
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
@@ -4629,31 +4639,11 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
EXPORT_SYMBOL(pci_wait_for_pending_transaction);
/**
- * pcie_has_flr - check if a device supports function level resets
- * @dev: device to check
- *
- * Returns true if the device advertises support for PCIe function level
- * resets.
- */
-bool pcie_has_flr(struct pci_dev *dev)
-{
- u32 cap;
-
- if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
- return false;
-
- pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
- return cap & PCI_EXP_DEVCAP_FLR;
-}
-EXPORT_SYMBOL_GPL(pcie_has_flr);
-
-/**
* pcie_flr - initiate a PCIe function level reset
* @dev: device to reset
*
- * Initiate a function level reset on @dev. The caller should ensure the
- * device supports FLR before calling this function, e.g. by using the
- * pcie_has_flr() helper.
+ * Initiate a function level reset unconditionally on @dev without
+ * checking any flags and DEVCAP
*/
int pcie_flr(struct pci_dev *dev)
{
@@ -4676,7 +4666,29 @@ int pcie_flr(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pcie_flr);
-static int pci_af_flr(struct pci_dev *dev, int probe)
+/**
+ * pcie_reset_flr - initiate a PCIe function level reset
+ * @dev: device to reset
+ * @probe: if true, return 0 if device can be reset this way
+ *
+ * Initiate a function level reset on @dev.
+ */
+int pcie_reset_flr(struct pci_dev *dev, bool probe)
+{
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
+ return -ENOTTY;
+
+ if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ return pcie_flr(dev);
+}
+EXPORT_SYMBOL_GPL(pcie_reset_flr);
+
+static int pci_af_flr(struct pci_dev *dev, bool probe)
{
int pos;
u8 cap;
@@ -4723,7 +4735,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
/**
* pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
* @dev: Device to reset.
- * @probe: If set, only check if the device can be reset this way.
+ * @probe: if true, return 0 if the device can be reset this way.
*
* If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
* unset, it will be reinitialized internally when going from PCI_D3hot to
@@ -4735,7 +4747,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
* by default (i.e. unless the @dev's d3hot_delay field has a different value).
* Moreover, only devices in D0 can be reset by this function.
*/
-static int pci_pm_reset(struct pci_dev *dev, int probe)
+static int pci_pm_reset(struct pci_dev *dev, bool probe)
{
u16 csr;
@@ -4995,7 +5007,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
-static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
{
struct pci_dev *pdev;
@@ -5013,7 +5025,7 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
return pci_bridge_secondary_bus_reset(dev->bus->self);
}
-static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
+static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
{
int rc = -ENOTTY;
@@ -5028,7 +5040,7 @@ static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
return rc;
}
-static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
+static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
{
if (dev->multifunction || dev->subordinate || !dev->slot ||
dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
@@ -5037,7 +5049,7 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
}
-static int pci_reset_bus_function(struct pci_dev *dev, int probe)
+static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
{
int rc;
@@ -5121,6 +5133,139 @@ static void pci_dev_restore(struct pci_dev *dev)
err_handler->reset_done(dev);
}
+/* dev->reset_methods[] is a 0-terminated list of indices into this array */
+static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
+ { },
+ { pci_dev_specific_reset, .name = "device_specific" },
+ { pci_dev_acpi_reset, .name = "acpi" },
+ { pcie_reset_flr, .name = "flr" },
+ { pci_af_flr, .name = "af_flr" },
+ { pci_pm_reset, .name = "pm" },
+ { pci_reset_bus_function, .name = "bus" },
+};
+
+static ssize_t reset_method_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t len = 0;
+ int i, m;
+
+ for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
+ m = pdev->reset_methods[i];
+ if (!m)
+ break;
+
+ len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
+ pci_reset_fn_methods[m].name);
+ }
+
+ if (len)
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+static int reset_method_lookup(const char *name)
+{
+ int m;
+
+ for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
+ if (sysfs_streq(name, pci_reset_fn_methods[m].name))
+ return m;
+ }
+
+ return 0; /* not found */
+}
+
+static ssize_t reset_method_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ char *options, *name;
+ int m, n;
+ u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
+
+ if (sysfs_streq(buf, "")) {
+ pdev->reset_methods[0] = 0;
+ pci_warn(pdev, "All device reset methods disabled by user");
+ return count;
+ }
+
+ if (sysfs_streq(buf, "default")) {
+ pci_init_reset_methods(pdev);
+ return count;
+ }
+
+ options = kstrndup(buf, count, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ n = 0;
+ while ((name = strsep(&options, " ")) != NULL) {
+ if (sysfs_streq(name, ""))
+ continue;
+
+ name = strim(name);
+
+ m = reset_method_lookup(name);
+ if (!m) {
+ pci_err(pdev, "Invalid reset method '%s'", name);
+ goto error;
+ }
+
+ if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
+ pci_err(pdev, "Unsupported reset method '%s'", name);
+ goto error;
+ }
+
+ if (n == PCI_NUM_RESET_METHODS - 1) {
+ pci_err(pdev, "Too many reset methods\n");
+ goto error;
+ }
+
+ reset_methods[n++] = m;
+ }
+
+ reset_methods[n] = 0;
+
+ /* Warn if dev-specific supported but not highest priority */
+ if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
+ reset_methods[0] != 1)
+ pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
+ memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
+ kfree(options);
+ return count;
+
+error:
+ /* Leave previous methods unchanged */
+ kfree(options);
+ return -EINVAL;
+}
+static DEVICE_ATTR_RW(reset_method);
+
+static struct attribute *pci_dev_reset_method_attrs[] = {
+ &dev_attr_reset_method.attr,
+ NULL,
+};
+
+static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ if (!pci_reset_supported(pdev))
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group pci_dev_reset_method_attr_group = {
+ .attrs = pci_dev_reset_method_attrs,
+ .is_visible = pci_dev_reset_method_attr_is_visible,
+};
+
/**
* __pci_reset_function_locked - reset a PCI device function while holding
* the @dev mutex lock.
@@ -5143,66 +5288,64 @@ static void pci_dev_restore(struct pci_dev *dev)
*/
int __pci_reset_function_locked(struct pci_dev *dev)
{
- int rc;
+ int i, m, rc = -ENOTTY;
might_sleep();
/*
- * A reset method returns -ENOTTY if it doesn't support this device
- * and we should try the next method.
+ * A reset method returns -ENOTTY if it doesn't support this device and
+ * we should try the next method.
*
- * If it returns 0 (success), we're finished. If it returns any
- * other error, we're also finished: this indicates that further
- * reset mechanisms might be broken on the device.
+ * If it returns 0 (success), we're finished. If it returns any other
+ * error, we're also finished: this indicates that further reset
+ * mechanisms might be broken on the device.
*/
- rc = pci_dev_specific_reset(dev, 0);
- if (rc != -ENOTTY)
- return rc;
- if (pcie_has_flr(dev)) {
- rc = pcie_flr(dev);
+ for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
+ m = dev->reset_methods[i];
+ if (!m)
+ return -ENOTTY;
+
+ rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
+ if (!rc)
+ return 0;
if (rc != -ENOTTY)
return rc;
}
- rc = pci_af_flr(dev, 0);
- if (rc != -ENOTTY)
- return rc;
- rc = pci_pm_reset(dev, 0);
- if (rc != -ENOTTY)
- return rc;
- return pci_reset_bus_function(dev, 0);
+
+ return -ENOTTY;
}
EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
/**
- * pci_probe_reset_function - check whether the device can be safely reset
- * @dev: PCI device to reset
+ * pci_init_reset_methods - check whether device can be safely reset
+ * and store supported reset mechanisms.
+ * @dev: PCI device to check for reset mechanisms
*
* Some devices allow an individual function to be reset without affecting
- * other functions in the same device. The PCI device must be responsive
- * to PCI config space in order to use this function.
+ * other functions in the same device. The PCI device must be in D0-D3hot
+ * state.
*
- * Returns 0 if the device function can be reset or negative if the
- * device doesn't support resetting a single function.
+ * Stores reset mechanisms supported by device in reset_methods byte array
+ * which is a member of struct pci_dev.
*/
-int pci_probe_reset_function(struct pci_dev *dev)
+void pci_init_reset_methods(struct pci_dev *dev)
{
- int rc;
+ int m, i, rc;
+
+ BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
might_sleep();
- rc = pci_dev_specific_reset(dev, 1);
- if (rc != -ENOTTY)
- return rc;
- if (pcie_has_flr(dev))
- return 0;
- rc = pci_af_flr(dev, 1);
- if (rc != -ENOTTY)
- return rc;
- rc = pci_pm_reset(dev, 1);
- if (rc != -ENOTTY)
- return rc;
+ i = 0;
+ for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
+ rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
+ if (!rc)
+ dev->reset_methods[i++] = m;
+ else if (rc != -ENOTTY)
+ break;
+ }
- return pci_reset_bus_function(dev, 1);
+ dev->reset_methods[i] = 0;
}
/**
@@ -5225,7 +5368,7 @@ int pci_reset_function(struct pci_dev *dev)
{
int rc;
- if (!dev->reset_fn)
+ if (!pci_reset_supported(dev))
return -ENOTTY;
pci_dev_lock(dev);
@@ -5261,7 +5404,7 @@ int pci_reset_function_locked(struct pci_dev *dev)
{
int rc;
- if (!dev->reset_fn)
+ if (!pci_reset_supported(dev))
return -ENOTTY;
pci_dev_save_and_disable(dev);
@@ -5284,7 +5427,7 @@ int pci_try_reset_function(struct pci_dev *dev)
{
int rc;
- if (!dev->reset_fn)
+ if (!pci_reset_supported(dev))
return -ENOTTY;
if (!pci_dev_trylock(dev))
@@ -5512,7 +5655,7 @@ static void pci_slot_restore_locked(struct pci_slot *slot)
}
}
-static int pci_slot_reset(struct pci_slot *slot, int probe)
+static int pci_slot_reset(struct pci_slot *slot, bool probe)
{
int rc;
@@ -5540,7 +5683,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe)
*/
int pci_probe_reset_slot(struct pci_slot *slot)
{
- return pci_slot_reset(slot, 1);
+ return pci_slot_reset(slot, PCI_RESET_PROBE);
}
EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
@@ -5563,14 +5706,14 @@ static int __pci_reset_slot(struct pci_slot *slot)
{
int rc;
- rc = pci_slot_reset(slot, 1);
+ rc = pci_slot_reset(slot, PCI_RESET_PROBE);
if (rc)
return rc;
if (pci_slot_trylock(slot)) {
pci_slot_save_and_disable_locked(slot);
might_sleep();
- rc = pci_reset_hotplug_slot(slot->hotplug, 0);
+ rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
pci_slot_restore_locked(slot);
pci_slot_unlock(slot);
} else
@@ -5579,7 +5722,7 @@ static int __pci_reset_slot(struct pci_slot *slot)
return rc;
}
-static int pci_bus_reset(struct pci_bus *bus, int probe)
+static int pci_bus_reset(struct pci_bus *bus, bool probe)
{
int ret;
@@ -5625,14 +5768,14 @@ int pci_bus_error_reset(struct pci_dev *bridge)
goto bus_reset;
list_for_each_entry(slot, &bus->slots, list)
- if (pci_slot_reset(slot, 0))
+ if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
goto bus_reset;
mutex_unlock(&pci_slot_mutex);
return 0;
bus_reset:
mutex_unlock(&pci_slot_mutex);
- return pci_bus_reset(bridge->subordinate, 0);
+ return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
}
/**
@@ -5643,7 +5786,7 @@ bus_reset:
*/
int pci_probe_reset_bus(struct pci_bus *bus)
{
- return pci_bus_reset(bus, 1);
+ return pci_bus_reset(bus, PCI_RESET_PROBE);
}
EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
@@ -5657,7 +5800,7 @@ static int __pci_reset_bus(struct pci_bus *bus)
{
int rc;
- rc = pci_bus_reset(bus, 1);
+ rc = pci_bus_reset(bus, PCI_RESET_PROBE);
if (rc)
return rc;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 2f52110cac97..1cce56c2aea0 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -33,10 +33,32 @@ enum pci_mmap_api {
int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
enum pci_mmap_api mmap_api);
-int pci_probe_reset_function(struct pci_dev *dev);
+bool pci_reset_supported(struct pci_dev *dev);
+void pci_init_reset_methods(struct pci_dev *dev);
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
int pci_bus_error_reset(struct pci_dev *dev);
+struct pci_cap_saved_data {
+ u16 cap_nr;
+ bool cap_extended;
+ unsigned int size;
+ u32 data[];
+};
+
+struct pci_cap_saved_state {
+ struct hlist_node next;
+ struct pci_cap_saved_data cap;
+};
+
+void pci_allocate_cap_save_buffers(struct pci_dev *dev);
+void pci_free_cap_save_buffers(struct pci_dev *dev);
+int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
+int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
+ u16 cap, unsigned int size);
+struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
+struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
+ u16 cap);
+
#define PCI_PM_D2_DELAY 200 /* usec; see PCIe r4.0, sec 5.9.1 */
#define PCI_PM_D3HOT_WAIT 10 /* msec */
#define PCI_PM_D3COLD_WAIT 100 /* msec */
@@ -100,8 +122,6 @@ void pci_pm_init(struct pci_dev *dev);
void pci_ea_init(struct pci_dev *dev);
void pci_msi_init(struct pci_dev *dev);
void pci_msix_init(struct pci_dev *dev);
-void pci_allocate_cap_save_buffers(struct pci_dev *dev);
-void pci_free_cap_save_buffers(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
@@ -604,13 +624,18 @@ static inline void pci_ptm_init(struct pci_dev *dev) { }
struct pci_dev_reset_methods {
u16 vendor;
u16 device;
- int (*reset)(struct pci_dev *dev, int probe);
+ int (*reset)(struct pci_dev *dev, bool probe);
+};
+
+struct pci_reset_fn_method {
+ int (*reset_fn)(struct pci_dev *pdev, bool probe);
+ char *name;
};
#ifdef CONFIG_PCI_QUIRKS
-int pci_dev_specific_reset(struct pci_dev *dev, int probe);
+int pci_dev_specific_reset(struct pci_dev *dev, bool probe);
#else
-static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+static inline int pci_dev_specific_reset(struct pci_dev *dev, bool probe)
{
return -ENOTTY;
}
@@ -698,7 +723,15 @@ static inline int pci_aer_raw_clear_status(struct pci_dev *dev) { return -EINVAL
#ifdef CONFIG_ACPI
int pci_acpi_program_hp_params(struct pci_dev *dev);
extern const struct attribute_group pci_dev_acpi_attr_group;
+void pci_set_acpi_fwnode(struct pci_dev *dev);
+int pci_dev_acpi_reset(struct pci_dev *dev, bool probe);
#else
+static inline int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
+{
+ return -ENOTTY;
+}
+
+static inline void pci_set_acpi_fwnode(struct pci_dev *dev) {}
static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
{
return -ENODEV;
@@ -709,4 +742,6 @@ static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
extern const struct attribute_group aspm_ctrl_attr_group;
#endif
+extern const struct attribute_group pci_dev_reset_method_attr_group;
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index df4ba9b384c2..9784fdcf3006 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1407,13 +1407,11 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
}
if (type == PCI_EXP_TYPE_RC_EC || type == PCI_EXP_TYPE_RC_END) {
- if (pcie_has_flr(dev)) {
- rc = pcie_flr(dev);
- pci_info(dev, "has been reset (%d)\n", rc);
- } else {
- pci_info(dev, "not reset (no FLR support)\n");
- rc = -ENOTTY;
- }
+ rc = pcie_reset_flr(dev, PCI_RESET_DO_RESET);
+ if (!rc)
+ pci_info(dev, "has been reset\n");
+ else
+ pci_info(dev, "not reset (no FLR support: %d)\n", rc);
} else {
rc = pci_bus_error_reset(dev);
pci_info(dev, "%s Port link has been reset (%d)\n",
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e1fed6649c41..3ee63968deaa 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -257,8 +257,13 @@ static int get_port_device_capability(struct pci_dev *dev)
services |= PCIE_PORT_SERVICE_DPC;
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
- pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- services |= PCIE_PORT_SERVICE_BWNOTIF;
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
+ u32 linkcap;
+
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap);
+ if (linkcap & PCI_EXP_LNKCAP_LBNC)
+ services |= PCIE_PORT_SERVICE_BWNOTIF;
+ }
return services;
}
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 8a4ad974c5ac..368a254e3124 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -60,10 +60,8 @@ void pci_save_ptm_state(struct pci_dev *dev)
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
- if (!save_state) {
- pci_err(dev, "no suspend buffer for PTM\n");
+ if (!save_state)
return;
- }
cap = (u16 *)&save_state->cap.data[0];
pci_read_config_word(dev, ptm + PCI_PTM_CTRL, cap);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 79177ac37880..d9fc02a71baa 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -19,6 +19,7 @@
#include <linux/hypervisor.h>
#include <linux/irqdomain.h>
#include <linux/pm_runtime.h>
+#include <linux/bitfield.h>
#include "pci.h"
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
@@ -594,6 +595,7 @@ static void pci_init_host_bridge(struct pci_host_bridge *bridge)
bridge->native_pme = 1;
bridge->native_ltr = 1;
bridge->native_dpc = 1;
+ bridge->domain_nr = PCI_DOMAIN_NR_NOT_SET;
device_initialize(&bridge->dev);
}
@@ -828,11 +830,15 @@ static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
{
struct irq_domain *d;
+ /* If the host bridge driver sets a MSI domain of the bridge, use it */
+ d = dev_get_msi_domain(bus->bridge);
+
/*
* Any firmware interface that can resolve the msi_domain
* should be called from here.
*/
- d = pci_host_bridge_of_msi_domain(bus);
+ if (!d)
+ d = pci_host_bridge_of_msi_domain(bus);
if (!d)
d = pci_host_bridge_acpi_msi_domain(bus);
@@ -898,7 +904,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
bus->ops = bridge->ops;
bus->number = bus->busn_res.start = bridge->busnr;
#ifdef CONFIG_PCI_DOMAINS_GENERIC
- bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
+ if (bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET)
+ bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
+ else
+ bus->domain_nr = bridge->domain_nr;
#endif
b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
@@ -1498,8 +1507,8 @@ void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_flags_reg = reg16;
- pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
- pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+ pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &pdev->devcap);
+ pdev->pcie_mpss = FIELD_GET(PCI_EXP_DEVCAP_PAYLOAD, pdev->devcap);
parent = pci_upstream_bridge(pdev);
if (!parent)
@@ -1809,6 +1818,9 @@ int pci_setup_device(struct pci_dev *dev)
dev->error_state = pci_channel_io_normal;
set_pcie_port_type(dev);
+ pci_set_of_node(dev);
+ pci_set_acpi_fwnode(dev);
+
pci_dev_assign_slot(dev);
/*
@@ -1946,6 +1958,7 @@ int pci_setup_device(struct pci_dev *dev)
default: /* unknown header */
pci_err(dev, "unknown header type %02x, ignoring device\n",
dev->hdr_type);
+ pci_release_of_node(dev);
return -EIO;
bad:
@@ -2225,7 +2238,6 @@ static void pci_release_capabilities(struct pci_dev *dev)
{
pci_aer_exit(dev);
pci_rcec_exit(dev);
- pci_vpd_release(dev);
pci_iov_release(dev);
pci_free_cap_save_buffers(dev);
}
@@ -2374,10 +2386,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
dev->vendor = l & 0xffff;
dev->device = (l >> 16) & 0xffff;
- pci_set_of_node(dev);
-
if (pci_setup_device(dev)) {
- pci_release_of_node(dev);
pci_bus_put(dev->bus);
kfree(dev);
return NULL;
@@ -2428,9 +2437,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_rcec_init(dev); /* Root Complex Event Collector */
pcie_report_downtraining(dev);
-
- if (pci_probe_reset_function(dev) == 0)
- dev->reset_fn = 1;
+ pci_init_reset_methods(dev);
}
/*
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index d32fbfc93ea9..cb18f8a13ab6 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -83,6 +83,7 @@ static ssize_t proc_bus_pci_read(struct file *file, char __user *buf,
buf += 4;
pos += 4;
cnt -= 4;
+ cond_resched();
}
if (cnt >= 2) {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ab3de1551b50..e5089af8ad90 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1822,6 +1822,45 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quir
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
/*
+ * HiSilicon KunPeng920 and KunPeng930 have devices appear as PCI but are
+ * actually on the AMBA bus. These fake PCI devices can support SVA via
+ * SMMU stall feature, by setting dma-can-stall for ACPI platforms.
+ *
+ * Normally stalling must not be enabled for PCI devices, since it would
+ * break the PCI requirement for free-flowing writes and may lead to
+ * deadlock. We expect PCI devices to support ATS and PRI if they want to
+ * be fault-tolerant, so there's no ACPI binding to describe anything else,
+ * even when a "PCI" device turns out to be a regular old SoC device
+ * dressed up as a RCiEP and normal rules don't apply.
+ */
+static void quirk_huawei_pcie_sva(struct pci_dev *pdev)
+{
+ struct property_entry properties[] = {
+ PROPERTY_ENTRY_BOOL("dma-can-stall"),
+ {},
+ };
+
+ if (pdev->revision != 0x21 && pdev->revision != 0x30)
+ return;
+
+ pdev->pasid_no_tlp = 1;
+
+ /*
+ * Set the dma-can-stall property on ACPI platforms. Device tree
+ * can set it directly.
+ */
+ if (!pdev->dev.of_node &&
+ device_add_properties(&pdev->dev, properties))
+ pci_warn(pdev, "could not add stall property");
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
+
+/*
* It's possible for the MSI to get corrupted if SHPC and ACPI are used
* together on certain PXH-based systems.
*/
@@ -3235,12 +3274,13 @@ static void fixup_mpss_256(struct pci_dev *dev)
{
dev->pcie_mpss = 1; /* 256 bytes */
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256);
/*
* Intel 5000 and 5100 Memory controllers have an erratum with read completion
@@ -3703,7 +3743,7 @@ DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
* reset a single function if other methods (e.g. FLR, PM D0->D3) are
* not available.
*/
-static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
+static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, bool probe)
{
/*
* http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf
@@ -3725,7 +3765,7 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
#define NSDE_PWR_STATE 0xd0100
#define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */
-static int reset_ivb_igd(struct pci_dev *dev, int probe)
+static int reset_ivb_igd(struct pci_dev *dev, bool probe)
{
void __iomem *mmio_base;
unsigned long timeout;
@@ -3768,7 +3808,7 @@ reset_complete:
}
/* Device-specific reset method for Chelsio T4-based adapters */
-static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
+static int reset_chelsio_generic_dev(struct pci_dev *dev, bool probe)
{
u16 old_command;
u16 msix_flags;
@@ -3846,14 +3886,14 @@ static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
* Chapter 3: NVMe control registers
* Chapter 7.3: Reset behavior
*/
-static int nvme_disable_and_flr(struct pci_dev *dev, int probe)
+static int nvme_disable_and_flr(struct pci_dev *dev, bool probe)
{
void __iomem *bar;
u16 cmd;
u32 cfg;
if (dev->class != PCI_CLASS_STORAGE_EXPRESS ||
- !pcie_has_flr(dev) || !pci_resource_start(dev, 0))
+ pcie_reset_flr(dev, PCI_RESET_PROBE) || !pci_resource_start(dev, 0))
return -ENOTTY;
if (probe)
@@ -3920,15 +3960,12 @@ static int nvme_disable_and_flr(struct pci_dev *dev, int probe)
* device too soon after FLR. A 250ms delay after FLR has heuristically
* proven to produce reliably working results for device assignment cases.
*/
-static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
+static int delay_250ms_after_flr(struct pci_dev *dev, bool probe)
{
- if (!pcie_has_flr(dev))
- return -ENOTTY;
-
if (probe)
- return 0;
+ return pcie_reset_flr(dev, PCI_RESET_PROBE);
- pcie_flr(dev);
+ pcie_reset_flr(dev, PCI_RESET_DO_RESET);
msleep(250);
@@ -3943,7 +3980,7 @@ static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
#define HINIC_OPERATION_TIMEOUT 15000 /* 15 seconds */
/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
-static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
+static int reset_hinic_vf_dev(struct pci_dev *pdev, bool probe)
{
unsigned long timeout;
void __iomem *bar;
@@ -4020,7 +4057,7 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
* because when a host assigns a device to a guest VM, the host may need
* to reset the device but probably doesn't have a driver for it.
*/
-int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+int pci_dev_specific_reset(struct pci_dev *dev, bool probe)
{
const struct pci_dev_reset_methods *i;
@@ -4615,6 +4652,18 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
+/*
+ * Each of these NXP Root Ports is in a Root Complex with a unique segment
+ * number and does provide isolation features to disable peer transactions
+ * and validate bus numbers in requests, but does not provide an ACS
+ * capability.
+ */
+static int pci_quirk_nxp_rp_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+}
+
static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
{
if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
@@ -4841,6 +4890,10 @@ static const struct pci_dev_acs_enabled {
{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
/* Cavium ThunderX */
{ PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
+ /* Cavium multi-function devices */
+ { PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs },
/* APM X-Gene */
{ PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
/* Ampere Computing */
@@ -4861,6 +4914,39 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
+ /* NXP root ports, xx=16, 12, or 08 cores */
+ /* LX2xx0A : without security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d81, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da1, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d83, pci_quirk_nxp_rp_acs },
+ /* LX2xx0C : security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d80, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da0, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d82, pci_quirk_nxp_rp_acs },
+ /* LX2xx0E : security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d90, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db0, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d92, pci_quirk_nxp_rp_acs },
+ /* LX2xx0N : without security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d91, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db1, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d93, pci_quirk_nxp_rp_acs },
+ /* LX2xx2A : without security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d89, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da9, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d8b, pci_quirk_nxp_rp_acs },
+ /* LX2xx2C : security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d88, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da8, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d8a, pci_quirk_nxp_rp_acs },
+ /* LX2xx2E : security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d98, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db8, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d9a, pci_quirk_nxp_rp_acs },
+ /* LX2xx2N : without security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d99, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db9, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
/* Zhaoxin Root/Downstream Ports */
{ PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
{ 0 }
@@ -5032,7 +5118,7 @@ static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
ctrl |= (cap & PCI_ACS_CR);
ctrl |= (cap & PCI_ACS_UF);
- if (dev->external_facing || dev->untrusted)
+ if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
ctrl |= (cap & PCI_ACS_TB);
pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
@@ -5630,7 +5716,7 @@ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
pdev->subsystem_device != 0x222e ||
- !pdev->reset_fn)
+ !pci_reset_supported(pdev))
return;
if (pci_enable_device_mem(pdev))
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index dd12c2fcc7dc..4c54c75050dc 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -19,7 +19,6 @@ static void pci_stop_dev(struct pci_dev *dev)
pci_pme_active(dev, false);
if (pci_dev_is_added(dev)) {
- dev->reset_fn = 0;
device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index 8b003c890b87..61a6fe3cde21 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -19,11 +19,12 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
u8 byte;
u16 word;
u32 dword;
- long err;
- int cfg_ret;
+ int err, cfg_ret;
+ err = -EPERM;
+ dev = NULL;
if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ goto error;
err = -ENODEV;
dev = pci_get_domain_bus_and_slot(0, bus, dfn);
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index 26bf7c877de5..25557b272a4f 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -9,116 +9,94 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/sched/signal.h>
+#include <asm/unaligned.h>
#include "pci.h"
-/* VPD access through PCI 2.2+ VPD capability */
+#define PCI_VPD_LRDT_TAG_SIZE 3
+#define PCI_VPD_SRDT_LEN_MASK 0x07
+#define PCI_VPD_SRDT_TAG_SIZE 1
+#define PCI_VPD_STIN_END 0x0f
+#define PCI_VPD_INFO_FLD_HDR_SIZE 3
-struct pci_vpd_ops {
- ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
- ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
-};
+static u16 pci_vpd_lrdt_size(const u8 *lrdt)
+{
+ return get_unaligned_le16(lrdt + 1);
+}
-struct pci_vpd {
- const struct pci_vpd_ops *ops;
- struct mutex lock;
- unsigned int len;
- u16 flag;
- u8 cap;
- unsigned int busy:1;
- unsigned int valid:1;
-};
+static u8 pci_vpd_srdt_tag(const u8 *srdt)
+{
+ return *srdt >> 3;
+}
-static struct pci_dev *pci_get_func0_dev(struct pci_dev *dev)
+static u8 pci_vpd_srdt_size(const u8 *srdt)
{
- return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ return *srdt & PCI_VPD_SRDT_LEN_MASK;
}
-/**
- * pci_read_vpd - Read one entry from Vital Product Data
- * @dev: pci device struct
- * @pos: offset in vpd space
- * @count: number of bytes to read
- * @buf: pointer to where to store result
- */
-ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
+static u8 pci_vpd_info_field_size(const u8 *info_field)
{
- if (!dev->vpd || !dev->vpd->ops)
- return -ENODEV;
- return dev->vpd->ops->read(dev, pos, count, buf);
+ return info_field[2];
}
-EXPORT_SYMBOL(pci_read_vpd);
-/**
- * pci_write_vpd - Write entry to Vital Product Data
- * @dev: pci device struct
- * @pos: offset in vpd space
- * @count: number of bytes to write
- * @buf: buffer containing write data
- */
-ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
+/* VPD access through PCI 2.2+ VPD capability */
+
+static struct pci_dev *pci_get_func0_dev(struct pci_dev *dev)
{
- if (!dev->vpd || !dev->vpd->ops)
- return -ENODEV;
- return dev->vpd->ops->write(dev, pos, count, buf);
+ return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
}
-EXPORT_SYMBOL(pci_write_vpd);
-#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
+#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
+#define PCI_VPD_SZ_INVALID UINT_MAX
/**
* pci_vpd_size - determine actual size of Vital Product Data
* @dev: pci device struct
- * @old_size: current assumed size, also maximum allowed size
*/
-static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
+static size_t pci_vpd_size(struct pci_dev *dev)
{
- size_t off = 0;
- unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
+ size_t off = 0, size;
+ unsigned char tag, header[1+2]; /* 1 byte tag, 2 bytes length */
- while (off < old_size && pci_read_vpd(dev, off, 1, header) == 1) {
- unsigned char tag;
+ /* Otherwise the following reads would fail. */
+ dev->vpd.len = PCI_VPD_MAX_SIZE;
- if (!header[0] && !off) {
- pci_info(dev, "Invalid VPD tag 00, assume missing optional VPD EPROM\n");
- return 0;
- }
+ while (pci_read_vpd(dev, off, 1, header) == 1) {
+ size = 0;
+
+ if (off == 0 && (header[0] == 0x00 || header[0] == 0xff))
+ goto error;
if (header[0] & PCI_VPD_LRDT) {
/* Large Resource Data Type Tag */
- tag = pci_vpd_lrdt_tag(header);
- /* Only read length from known tag items */
- if ((tag == PCI_VPD_LTIN_ID_STRING) ||
- (tag == PCI_VPD_LTIN_RO_DATA) ||
- (tag == PCI_VPD_LTIN_RW_DATA)) {
- if (pci_read_vpd(dev, off+1, 2,
- &header[1]) != 2) {
- pci_warn(dev, "invalid large VPD tag %02x size at offset %zu",
- tag, off + 1);
- return 0;
- }
- off += PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(header);
+ if (pci_read_vpd(dev, off + 1, 2, &header[1]) != 2) {
+ pci_warn(dev, "failed VPD read at offset %zu\n",
+ off + 1);
+ return off ?: PCI_VPD_SZ_INVALID;
}
+ size = pci_vpd_lrdt_size(header);
+ if (off + size > PCI_VPD_MAX_SIZE)
+ goto error;
+
+ off += PCI_VPD_LRDT_TAG_SIZE + size;
} else {
/* Short Resource Data Type Tag */
- off += PCI_VPD_SRDT_TAG_SIZE +
- pci_vpd_srdt_size(header);
tag = pci_vpd_srdt_tag(header);
- }
-
- if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
- return off;
+ size = pci_vpd_srdt_size(header);
+ if (off + size > PCI_VPD_MAX_SIZE)
+ goto error;
- if ((tag != PCI_VPD_LTIN_ID_STRING) &&
- (tag != PCI_VPD_LTIN_RO_DATA) &&
- (tag != PCI_VPD_LTIN_RW_DATA)) {
- pci_warn(dev, "invalid %s VPD tag %02x at offset %zu",
- (header[0] & PCI_VPD_LRDT) ? "large" : "short",
- tag, off);
- return 0;
+ off += PCI_VPD_SRDT_TAG_SIZE + size;
+ if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
+ return off;
}
}
- return 0;
+ return off;
+
+error:
+ pci_info(dev, "invalid VPD tag %#04x (size %zu) at offset %zu%s\n",
+ header[0], size, off, off == 0 ?
+ "; assume missing optional EEPROM" : "");
+ return off ?: PCI_VPD_SZ_INVALID;
}
/*
@@ -126,33 +104,26 @@ static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
* This code has to spin since there is no other notification from the PCI
* hardware. Since the VPD is often implemented by serial attachment to an
* EEPROM, it may take many milliseconds to complete.
+ * @set: if true wait for flag to be set, else wait for it to be cleared
*
* Returns 0 on success, negative values indicate error.
*/
-static int pci_vpd_wait(struct pci_dev *dev)
+static int pci_vpd_wait(struct pci_dev *dev, bool set)
{
- struct pci_vpd *vpd = dev->vpd;
+ struct pci_vpd *vpd = &dev->vpd;
unsigned long timeout = jiffies + msecs_to_jiffies(125);
unsigned long max_sleep = 16;
u16 status;
int ret;
- if (!vpd->busy)
- return 0;
-
do {
ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
&status);
if (ret < 0)
return ret;
- if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
- vpd->busy = 0;
+ if (!!(status & PCI_VPD_ADDR_F) == set)
return 0;
- }
-
- if (fatal_signal_pending(current))
- return -EINTR;
if (time_after(jiffies, timeout))
break;
@@ -169,22 +140,17 @@ static int pci_vpd_wait(struct pci_dev *dev)
static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
void *arg)
{
- struct pci_vpd *vpd = dev->vpd;
- int ret;
+ struct pci_vpd *vpd = &dev->vpd;
+ int ret = 0;
loff_t end = pos + count;
u8 *buf = arg;
+ if (!vpd->cap)
+ return -ENODEV;
+
if (pos < 0)
return -EINVAL;
- if (!vpd->valid) {
- vpd->valid = 1;
- vpd->len = pci_vpd_size(dev, vpd->len);
- }
-
- if (vpd->len == 0)
- return -EIO;
-
if (pos > vpd->len)
return 0;
@@ -196,21 +162,20 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
if (mutex_lock_killable(&vpd->lock))
return -EINTR;
- ret = pci_vpd_wait(dev);
- if (ret < 0)
- goto out;
-
while (pos < end) {
u32 val;
unsigned int i, skip;
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
pos & ~3);
if (ret < 0)
break;
- vpd->busy = 1;
- vpd->flag = PCI_VPD_ADDR_F;
- ret = pci_vpd_wait(dev);
+ ret = pci_vpd_wait(dev, true);
if (ret < 0)
break;
@@ -228,7 +193,7 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
val >>= 8;
}
}
-out:
+
mutex_unlock(&vpd->lock);
return ret ? ret : count;
}
@@ -236,41 +201,26 @@ out:
static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
const void *arg)
{
- struct pci_vpd *vpd = dev->vpd;
+ struct pci_vpd *vpd = &dev->vpd;
const u8 *buf = arg;
loff_t end = pos + count;
int ret = 0;
+ if (!vpd->cap)
+ return -ENODEV;
+
if (pos < 0 || (pos & 3) || (count & 3))
return -EINVAL;
- if (!vpd->valid) {
- vpd->valid = 1;
- vpd->len = pci_vpd_size(dev, vpd->len);
- }
-
- if (vpd->len == 0)
- return -EIO;
-
if (end > vpd->len)
return -EINVAL;
if (mutex_lock_killable(&vpd->lock))
return -EINTR;
- ret = pci_vpd_wait(dev);
- if (ret < 0)
- goto out;
-
while (pos < end) {
- u32 val;
-
- val = *buf++;
- val |= *buf++ << 8;
- val |= *buf++ << 16;
- val |= *buf++ << 24;
-
- ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
+ ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA,
+ get_unaligned_le32(buf));
if (ret < 0)
break;
ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
@@ -278,85 +228,28 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
if (ret < 0)
break;
- vpd->busy = 1;
- vpd->flag = 0;
- ret = pci_vpd_wait(dev);
+ ret = pci_vpd_wait(dev, false);
if (ret < 0)
break;
+ buf += sizeof(u32);
pos += sizeof(u32);
}
-out:
+
mutex_unlock(&vpd->lock);
return ret ? ret : count;
}
-static const struct pci_vpd_ops pci_vpd_ops = {
- .read = pci_vpd_read,
- .write = pci_vpd_write,
-};
-
-static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
- void *arg)
-{
- struct pci_dev *tdev = pci_get_func0_dev(dev);
- ssize_t ret;
-
- if (!tdev)
- return -ENODEV;
-
- ret = pci_read_vpd(tdev, pos, count, arg);
- pci_dev_put(tdev);
- return ret;
-}
-
-static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
- const void *arg)
-{
- struct pci_dev *tdev = pci_get_func0_dev(dev);
- ssize_t ret;
-
- if (!tdev)
- return -ENODEV;
-
- ret = pci_write_vpd(tdev, pos, count, arg);
- pci_dev_put(tdev);
- return ret;
-}
-
-static const struct pci_vpd_ops pci_vpd_f0_ops = {
- .read = pci_vpd_f0_read,
- .write = pci_vpd_f0_write,
-};
-
void pci_vpd_init(struct pci_dev *dev)
{
- struct pci_vpd *vpd;
- u8 cap;
+ dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
+ mutex_init(&dev->vpd.lock);
- cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
- if (!cap)
- return;
-
- vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
- if (!vpd)
- return;
-
- vpd->len = PCI_VPD_MAX_SIZE;
- if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
- vpd->ops = &pci_vpd_f0_ops;
- else
- vpd->ops = &pci_vpd_ops;
- mutex_init(&vpd->lock);
- vpd->cap = cap;
- vpd->busy = 0;
- vpd->valid = 0;
- dev->vpd = vpd;
-}
+ if (!dev->vpd.len)
+ dev->vpd.len = pci_vpd_size(dev);
-void pci_vpd_release(struct pci_dev *dev)
-{
- kfree(dev->vpd);
+ if (dev->vpd.len == PCI_VPD_SZ_INVALID)
+ dev->vpd.cap = 0;
}
static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
@@ -388,7 +281,7 @@ static umode_t vpd_attr_is_visible(struct kobject *kobj,
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- if (!pdev->vpd)
+ if (!pdev->vpd.cap)
return 0;
return a->attr.mode;
@@ -399,23 +292,63 @@ const struct attribute_group pci_dev_vpd_attr_group = {
.is_bin_visible = vpd_attr_is_visible,
};
-int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt)
+void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size)
+{
+ unsigned int len = dev->vpd.len;
+ void *buf;
+ int cnt;
+
+ if (!dev->vpd.cap)
+ return ERR_PTR(-ENODEV);
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cnt = pci_read_vpd(dev, 0, len, buf);
+ if (cnt != len) {
+ kfree(buf);
+ return ERR_PTR(-EIO);
+ }
+
+ if (size)
+ *size = len;
+
+ return buf;
+}
+EXPORT_SYMBOL_GPL(pci_vpd_alloc);
+
+static int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt, unsigned int *size)
{
int i = 0;
/* look for LRDT tags only, end tag is the only SRDT tag */
while (i + PCI_VPD_LRDT_TAG_SIZE <= len && buf[i] & PCI_VPD_LRDT) {
- if (buf[i] == rdt)
+ unsigned int lrdt_len = pci_vpd_lrdt_size(buf + i);
+ u8 tag = buf[i];
+
+ i += PCI_VPD_LRDT_TAG_SIZE;
+ if (tag == rdt) {
+ if (i + lrdt_len > len)
+ lrdt_len = len - i;
+ if (size)
+ *size = lrdt_len;
return i;
+ }
- i += PCI_VPD_LRDT_TAG_SIZE + pci_vpd_lrdt_size(buf + i);
+ i += lrdt_len;
}
return -ENOENT;
}
-EXPORT_SYMBOL_GPL(pci_vpd_find_tag);
-int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
+int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size)
+{
+ return pci_vpd_find_tag(buf, len, PCI_VPD_LRDT_ID_STRING, size);
+}
+EXPORT_SYMBOL_GPL(pci_vpd_find_id_string);
+
+static int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
unsigned int len, const char *kw)
{
int i;
@@ -431,7 +364,106 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
return -ENOENT;
}
-EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword);
+
+/**
+ * pci_read_vpd - Read one entry from Vital Product Data
+ * @dev: PCI device struct
+ * @pos: offset in VPD space
+ * @count: number of bytes to read
+ * @buf: pointer to where to store result
+ */
+ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
+{
+ ssize_t ret;
+
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+ dev = pci_get_func0_dev(dev);
+ if (!dev)
+ return -ENODEV;
+
+ ret = pci_vpd_read(dev, pos, count, buf);
+ pci_dev_put(dev);
+ return ret;
+ }
+
+ return pci_vpd_read(dev, pos, count, buf);
+}
+EXPORT_SYMBOL(pci_read_vpd);
+
+/**
+ * pci_write_vpd - Write entry to Vital Product Data
+ * @dev: PCI device struct
+ * @pos: offset in VPD space
+ * @count: number of bytes to write
+ * @buf: buffer containing write data
+ */
+ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
+{
+ ssize_t ret;
+
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+ dev = pci_get_func0_dev(dev);
+ if (!dev)
+ return -ENODEV;
+
+ ret = pci_vpd_write(dev, pos, count, buf);
+ pci_dev_put(dev);
+ return ret;
+ }
+
+ return pci_vpd_write(dev, pos, count, buf);
+}
+EXPORT_SYMBOL(pci_write_vpd);
+
+int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
+ const char *kw, unsigned int *size)
+{
+ int ro_start, infokw_start;
+ unsigned int ro_len, infokw_size;
+
+ ro_start = pci_vpd_find_tag(buf, len, PCI_VPD_LRDT_RO_DATA, &ro_len);
+ if (ro_start < 0)
+ return ro_start;
+
+ infokw_start = pci_vpd_find_info_keyword(buf, ro_start, ro_len, kw);
+ if (infokw_start < 0)
+ return infokw_start;
+
+ infokw_size = pci_vpd_info_field_size(buf + infokw_start);
+ infokw_start += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ if (infokw_start + infokw_size > len)
+ return -EINVAL;
+
+ if (size)
+ *size = infokw_size;
+
+ return infokw_start;
+}
+EXPORT_SYMBOL_GPL(pci_vpd_find_ro_info_keyword);
+
+int pci_vpd_check_csum(const void *buf, unsigned int len)
+{
+ const u8 *vpd = buf;
+ unsigned int size;
+ u8 csum = 0;
+ int rv_start;
+
+ rv_start = pci_vpd_find_ro_info_keyword(buf, len, PCI_VPD_RO_KEYWORD_CHKSUM, &size);
+ if (rv_start == -ENOENT) /* no checksum in VPD */
+ return 1;
+ else if (rv_start < 0)
+ return rv_start;
+
+ if (!size)
+ return -EINVAL;
+
+ while (rv_start >= 0)
+ csum += vpd[rv_start--];
+
+ return csum ? -EILSEQ : 0;
+}
+EXPORT_SYMBOL_GPL(pci_vpd_check_csum);
#ifdef CONFIG_PCI_QUIRKS
/*
@@ -450,7 +482,7 @@ static void quirk_f0_vpd_link(struct pci_dev *dev)
if (!f0)
return;
- if (f0->vpd && dev->class == f0->class &&
+ if (f0->vpd.cap && dev->class == f0->class &&
dev->vendor == f0->vendor && dev->device == f0->device)
dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
@@ -468,41 +500,27 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
*/
static void quirk_blacklist_vpd(struct pci_dev *dev)
{
- if (dev->vpd) {
- dev->vpd->len = 0;
- pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
- }
+ dev->vpd.len = PCI_VPD_SZ_INVALID;
+ pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
- quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, quirk_blacklist_vpd);
/*
* The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
* device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
*/
-DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
- PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd);
-
-static void pci_vpd_set_size(struct pci_dev *dev, size_t len)
-{
- struct pci_vpd *vpd = dev->vpd;
-
- if (!vpd || len == 0 || len > PCI_VPD_MAX_SIZE)
- return;
-
- vpd->valid = 1;
- vpd->len = len;
-}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd);
static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
{
@@ -522,12 +540,12 @@ static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
* limits.
*/
if (chip == 0x0 && prod >= 0x20)
- pci_vpd_set_size(dev, 8192);
+ dev->vpd.len = 8192;
else if (chip >= 0x4 && func < 0x8)
- pci_vpd_set_size(dev, 2048);
+ dev->vpd.len = 2048;
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
- quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ quirk_chelsio_extend_vpd);
#endif
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index b7a8f3a1921f..2156c632524d 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -115,7 +115,7 @@ static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
struct xen_pci_op *active_op = &pdev->sh_info->op;
unsigned long irq_flags;
evtchn_port_t port = pdev->evtchn;
- unsigned irq = pdev->irq;
+ unsigned int irq = pdev->irq;
s64 ns, ns_timeout;
spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
@@ -153,10 +153,10 @@ static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
}
/*
- * We might lose backend service request since we
- * reuse same evtchn with pci_conf backend response. So re-schedule
- * aer pcifront service.
- */
+ * We might lose backend service request since we
+ * reuse same evtchn with pci_conf backend response. So re-schedule
+ * aer pcifront service.
+ */
if (test_bit(_XEN_PCIB_active,
(unsigned long *)&pdev->sh_info->flags)) {
dev_err(&pdev->xdev->dev,
@@ -414,7 +414,8 @@ static int pcifront_scan_bus(struct pcifront_device *pdev,
struct pci_dev *d;
unsigned int devfn;
- /* Scan the bus for functions and add.
+ /*
+ * Scan the bus for functions and add.
* We omit handling of PCI bridge attachment because pciback prevents
* bridges from being exported.
*/
@@ -492,8 +493,10 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
list_add(&bus_entry->list, &pdev->root_buses);
- /* pci_scan_root_bus skips devices which do not have a
- * devfn==0. The pcifront_scan_bus enumerates all devfn. */
+ /*
+ * pci_scan_root_bus skips devices which do not have a
+ * devfn==0. The pcifront_scan_bus enumerates all devfn.
+ */
err = pcifront_scan_bus(pdev, domain, bus, b);
/* Claim resources before going "live" with our devices */
@@ -651,8 +654,10 @@ static void pcifront_do_aer(struct work_struct *data)
pci_channel_state_t state =
(pci_channel_state_t)pdev->sh_info->aer_op.err;
- /*If a pci_conf op is in progress,
- we have to wait until it is done before service aer op*/
+ /*
+ * If a pci_conf op is in progress, we have to wait until it is done
+ * before service aer op
+ */
dev_dbg(&pdev->xdev->dev,
"pcifront service aer bus %x devfn %x\n",
pdev->sh_info->aer_op.bus, pdev->sh_info->aer_op.devfn);
@@ -676,6 +681,7 @@ static void pcifront_do_aer(struct work_struct *data)
static irqreturn_t pcifront_handler_aer(int irq, void *dev)
{
struct pcifront_device *pdev = dev;
+
schedule_pcifront_aer_op(pdev);
return IRQ_HANDLED;
}
@@ -693,7 +699,7 @@ static int pcifront_connect_and_init_dma(struct pcifront_device *pdev)
spin_unlock(&pcifront_dev_lock);
- if (!err && !is_swiotlb_active()) {
+ if (!err && !is_swiotlb_active(&pdev->xdev->dev)) {
err = pci_xen_swiotlb_init_late();
if (err)
dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n");
@@ -1027,6 +1033,7 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
/* Find devices being detached and remove them. */
for (i = 0; i < num_devs; i++) {
int l, state;
+
l = snprintf(str, sizeof(str), "state-%d", i);
if (unlikely(l >= (sizeof(str) - 1))) {
err = -ENOMEM;
@@ -1078,7 +1085,7 @@ out:
return err;
}
-static void __ref pcifront_backend_changed(struct xenbus_device *xdev,
+static void pcifront_backend_changed(struct xenbus_device *xdev,
enum xenbus_state be_state)
{
struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev);
@@ -1137,6 +1144,7 @@ out:
static int pcifront_xenbus_remove(struct xenbus_device *xdev)
{
struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev);
+
if (pdev)
free_pdev(pdev);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 7dd35f1b9cc5..82b63e60c5a2 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -37,7 +37,7 @@ config PHY_LPC18XX_USB_OTG
config PHY_PISTACHIO_USB
tristate "IMG Pistachio USB2.0 PHY driver"
- depends on MACH_PISTACHIO
+ depends on MIPS || COMPILE_TEST
select GENERIC_PHY
help
Enable this to support the USB2.0 PHY on the IMG Pistachio SoC.
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 3e491dfb2525..937a14fa7448 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -15,6 +15,7 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/reset.h>
+#include <linux/units.h>
#define STM32_USBPHYC_PLL 0x0
#define STM32_USBPHYC_MISC 0x8
@@ -47,7 +48,6 @@
#define PLL_FVCO_MHZ 2880
#define PLL_INFF_MIN_RATE_HZ 19200000
#define PLL_INFF_MAX_RATE_HZ 38400000
-#define HZ_PER_MHZ 1000000L
struct pll_params {
u8 ndiv;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index f38f12801f18..31921108e456 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -248,12 +248,15 @@ config PINCTRL_SX150X
- 16 bits: sx1509q, sx1506q
config PINCTRL_PISTACHIO
- def_bool y if MACH_PISTACHIO
+ bool "IMG Pistachio SoC pinctrl driver"
+ depends on OF && (MIPS || COMPILE_TEST)
depends on GPIOLIB
select PINMUX
select GENERIC_PINCONF
select GPIOLIB_IRQCHIP
select OF_GPIO
+ help
+ This support pinctrl and gpio driver for IMG Pistachio SoC.
config PINCTRL_ST
bool
@@ -404,6 +407,25 @@ config PINCTRL_K210
Add support for the Canaan Kendryte K210 RISC-V SOC Field
Programmable IO Array (FPIOA) controller.
+config PINCTRL_KEEMBAY
+ tristate "Pinctrl driver for Intel Keem Bay SoC"
+ depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST)
+ depends on HAS_IOMEM
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select GPIO_GENERIC
+ help
+ This selects pin control driver for the Intel Keembay SoC.
+ It provides pin config functions such as pullup, pulldown,
+ interrupt, drive strength, sec lock, schmitt trigger, slew
+ rate control and direction control. This module will be
+ called as pinctrl-keembay.
+
source "drivers/pinctrl/actions/Kconfig"
source "drivers/pinctrl/aspeed/Kconfig"
source "drivers/pinctrl/bcm/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 5ef5334a797f..200073bcc2c1 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
obj-$(CONFIG_PINCTRL_MICROCHIP_SGPIO) += pinctrl-microchip-sgpio.o
obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
obj-$(CONFIG_PINCTRL_K210) += pinctrl-k210.o
+obj-$(CONFIG_PINCTRL_KEEMBAY) += pinctrl-keembay.o
obj-y += actions/
obj-$(CONFIG_ARCH_ASPEED) += aspeed/
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 9bbfe5c14b36..c94e24aadf92 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -133,8 +133,8 @@ static int aspeed_disable_sig(struct aspeed_pinmux_data *ctx,
}
/**
- * Search for the signal expression needed to enable the pin's signal for the
- * requested function.
+ * aspeed_find_expr_by_name - Search for the signal expression needed to
+ * enable the pin's signal for the requested function.
*
* @exprs: List of signal expressions (haystack)
* @name: The name of the requested function (needle)
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.c b/drivers/pinctrl/aspeed/pinmux-aspeed.c
index 894e2efd3be7..4aa46383c2c5 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.c
@@ -59,7 +59,8 @@ int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc,
}
/**
- * Query the enabled or disabled state for a mux function's signal on a pin
+ * aspeed_sig_expr_eval - Query the enabled or disabled state for a
+ * mux function's signal on a pin
*
* @ctx: The driver context for the pinctrl IP
* @expr: An expression controlling the signal for a mux function on a pin
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
index b69ba6b360a2..4d7548686f39 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
@@ -5,7 +5,6 @@
#define ASPEED_PINMUX_H
#include <linux/regmap.h>
-#include <stdbool.h>
/*
* The ASPEED SoCs provide typically more than 200 pins for GPIO and other
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 8b34d2c308c7..6e6fefeb21ea 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -416,8 +416,7 @@ static void bcm2835_gpio_irq_handler(struct irq_desc *desc)
}
}
/* This should not happen, every IRQ has a bank */
- if (i == BCM2835_NUM_IRQS)
- BUG();
+ BUG_ON(i == BCM2835_NUM_IRQS);
chained_irq_enter(host_chip, desc);
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index f294336430cc..21fa21c6547b 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -166,6 +166,13 @@ config PINCTRL_IMX8DXL
help
Say Y here to enable the imx8dxl pinctrl driver
+config PINCTRL_IMX8ULP
+ tristate "IMX8ULP pinctrl driver"
+ depends on ARCH_MXC
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx8ulp pinctrl driver
+
config PINCTRL_VF610
bool "Freescale Vybrid VF610 pinctrl driver"
depends on SOC_VF610
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index e476cb671037..c44930b1b362 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_PINCTRL_IMX8MQ) += pinctrl-imx8mq.o
obj-$(CONFIG_PINCTRL_IMX8QM) += pinctrl-imx8qm.o
obj-$(CONFIG_PINCTRL_IMX8QXP) += pinctrl-imx8qxp.o
obj-$(CONFIG_PINCTRL_IMX8DXL) += pinctrl-imx8dxl.o
+obj-$(CONFIG_PINCTRL_IMX8ULP) += pinctrl-imx8ulp.o
obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o
obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8dxl.c b/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
index 041455c13d0d..f947b1d0d1aa 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
@@ -155,7 +155,7 @@ static const struct pinctrl_pin_desc imx8dxl_pinctrl_pads[] = {
};
-static struct imx_pinctrl_soc_info imx8dxl_pinctrl_info = {
+static const struct imx_pinctrl_soc_info imx8dxl_pinctrl_info = {
.pins = imx8dxl_pinctrl_pads,
.npins = ARRAY_SIZE(imx8dxl_pinctrl_pads),
.flags = IMX_USE_SCU,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mn.c b/drivers/pinctrl/freescale/pinctrl-imx8mn.c
index 448a79eb4568..dbf89cfba477 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8mn.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mn.c
@@ -317,7 +317,7 @@ static const struct pinctrl_pin_desc imx8mn_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX8MN_IOMUXC_UART4_TXD),
};
-static struct imx_pinctrl_soc_info imx8mn_pinctrl_info = {
+static const struct imx_pinctrl_soc_info imx8mn_pinctrl_info = {
.pins = imx8mn_pinctrl_pads,
.npins = ARRAY_SIZE(imx8mn_pinctrl_pads),
.gpr_compatible = "fsl,imx8mn-iomuxc-gpr",
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8qxp.c b/drivers/pinctrl/freescale/pinctrl-imx8qxp.c
index 4f97813ba8b7..0a0acc0038d0 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8qxp.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8qxp.c
@@ -194,7 +194,7 @@ static const struct pinctrl_pin_desc imx8qxp_pinctrl_pads[] = {
IMX_PINCTRL_PIN(IMX8QXP_COMP_CTL_GPIO_1V8_3V3_QSPI0B),
};
-static struct imx_pinctrl_soc_info imx8qxp_pinctrl_info = {
+static const struct imx_pinctrl_soc_info imx8qxp_pinctrl_info = {
.pins = imx8qxp_pinctrl_pads,
.npins = ARRAY_SIZE(imx8qxp_pinctrl_pads),
.flags = IMX_USE_SCU,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8ulp.c b/drivers/pinctrl/freescale/pinctrl-imx8ulp.c
new file mode 100644
index 000000000000..f8572597a54e
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx8ulp.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2021 NXP
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx8ulp_pads {
+ IMX8ULP_PAD_PTD0 = 0,
+ IMX8ULP_PAD_PTD1,
+ IMX8ULP_PAD_PTD2,
+ IMX8ULP_PAD_PTD3,
+ IMX8ULP_PAD_PTD4,
+ IMX8ULP_PAD_PTD5,
+ IMX8ULP_PAD_PTD6,
+ IMX8ULP_PAD_PTD7,
+ IMX8ULP_PAD_PTD8,
+ IMX8ULP_PAD_PTD9,
+ IMX8ULP_PAD_PTD10,
+ IMX8ULP_PAD_PTD11,
+ IMX8ULP_PAD_PTD12,
+ IMX8ULP_PAD_PTD13,
+ IMX8ULP_PAD_PTD14,
+ IMX8ULP_PAD_PTD15,
+ IMX8ULP_PAD_PTD16,
+ IMX8ULP_PAD_PTD17,
+ IMX8ULP_PAD_PTD18,
+ IMX8ULP_PAD_PTD19,
+ IMX8ULP_PAD_PTD20,
+ IMX8ULP_PAD_PTD21,
+ IMX8ULP_PAD_PTD22,
+ IMX8ULP_PAD_PTD23,
+ IMX8ULP_PAD_RESERVE0,
+ IMX8ULP_PAD_RESERVE1,
+ IMX8ULP_PAD_RESERVE2,
+ IMX8ULP_PAD_RESERVE3,
+ IMX8ULP_PAD_RESERVE4,
+ IMX8ULP_PAD_RESERVE5,
+ IMX8ULP_PAD_RESERVE6,
+ IMX8ULP_PAD_RESERVE7,
+ IMX8ULP_PAD_PTE0,
+ IMX8ULP_PAD_PTE1,
+ IMX8ULP_PAD_PTE2,
+ IMX8ULP_PAD_PTE3,
+ IMX8ULP_PAD_PTE4,
+ IMX8ULP_PAD_PTE5,
+ IMX8ULP_PAD_PTE6,
+ IMX8ULP_PAD_PTE7,
+ IMX8ULP_PAD_PTE8,
+ IMX8ULP_PAD_PTE9,
+ IMX8ULP_PAD_PTE10,
+ IMX8ULP_PAD_PTE11,
+ IMX8ULP_PAD_PTE12,
+ IMX8ULP_PAD_PTE13,
+ IMX8ULP_PAD_PTE14,
+ IMX8ULP_PAD_PTE15,
+ IMX8ULP_PAD_PTE16,
+ IMX8ULP_PAD_PTE17,
+ IMX8ULP_PAD_PTE18,
+ IMX8ULP_PAD_PTE19,
+ IMX8ULP_PAD_PTE20,
+ IMX8ULP_PAD_PTE21,
+ IMX8ULP_PAD_PTE22,
+ IMX8ULP_PAD_PTE23,
+ IMX8ULP_PAD_RESERVE8,
+ IMX8ULP_PAD_RESERVE9,
+ IMX8ULP_PAD_RESERVE10,
+ IMX8ULP_PAD_RESERVE11,
+ IMX8ULP_PAD_RESERVE12,
+ IMX8ULP_PAD_RESERVE13,
+ IMX8ULP_PAD_RESERVE14,
+ IMX8ULP_PAD_RESERVE15,
+ IMX8ULP_PAD_PTF0,
+ IMX8ULP_PAD_PTF1,
+ IMX8ULP_PAD_PTF2,
+ IMX8ULP_PAD_PTF3,
+ IMX8ULP_PAD_PTF4,
+ IMX8ULP_PAD_PTF5,
+ IMX8ULP_PAD_PTF6,
+ IMX8ULP_PAD_PTF7,
+ IMX8ULP_PAD_PTF8,
+ IMX8ULP_PAD_PTF9,
+ IMX8ULP_PAD_PTF10,
+ IMX8ULP_PAD_PTF11,
+ IMX8ULP_PAD_PTF12,
+ IMX8ULP_PAD_PTF13,
+ IMX8ULP_PAD_PTF14,
+ IMX8ULP_PAD_PTF15,
+ IMX8ULP_PAD_PTF16,
+ IMX8ULP_PAD_PTF17,
+ IMX8ULP_PAD_PTF18,
+ IMX8ULP_PAD_PTF19,
+ IMX8ULP_PAD_PTF20,
+ IMX8ULP_PAD_PTF21,
+ IMX8ULP_PAD_PTF22,
+ IMX8ULP_PAD_PTF23,
+ IMX8ULP_PAD_PTF24,
+ IMX8ULP_PAD_PTF25,
+ IMX8ULP_PAD_PTF26,
+ IMX8ULP_PAD_PTF27,
+ IMX8ULP_PAD_PTF28,
+ IMX8ULP_PAD_PTF29,
+ IMX8ULP_PAD_PTF30,
+ IMX8ULP_PAD_PTF31,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx8ulp_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD0),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD1),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD2),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD3),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD4),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD5),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD6),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD7),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD8),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD9),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD10),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD11),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD12),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD13),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD14),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD15),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD16),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD17),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD18),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD19),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD20),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD21),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD22),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTD23),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE4),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE5),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE6),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE7),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE0),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE1),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE2),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE3),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE4),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE5),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE6),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE7),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE8),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE9),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE10),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE11),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE12),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE13),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE14),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE15),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE16),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE17),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE18),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE19),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE20),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE21),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE22),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTE23),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE8),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE9),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE10),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE11),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE12),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE13),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE14),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_RESERVE15),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF0),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF1),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF2),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF3),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF4),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF5),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF6),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF7),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF8),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF9),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF10),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF11),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF12),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF13),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF14),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF15),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF16),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF17),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF18),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF19),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF20),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF21),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF22),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF23),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF24),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF25),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF26),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF27),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF28),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF29),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF30),
+ IMX_PINCTRL_PIN(IMX8ULP_PAD_PTF31),
+};
+
+#define BM_OBE_ENABLED BIT(17)
+#define BM_IBE_ENABLED BIT(16)
+#define BM_MUX_MODE 0xf00
+#define BP_MUX_MODE 8
+
+static int imx8ulp_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset, bool input)
+{
+ struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct imx_pin_reg *pin_reg;
+ u32 reg;
+
+ pin_reg = &ipctl->pin_regs[offset];
+ if (pin_reg->mux_reg == -1)
+ return -EINVAL;
+
+ reg = readl(ipctl->base + pin_reg->mux_reg);
+ if (input)
+ reg = (reg & ~BM_OBE_ENABLED) | BM_IBE_ENABLED;
+ else
+ reg = (reg & ~BM_IBE_ENABLED) | BM_OBE_ENABLED;
+ writel(reg, ipctl->base + pin_reg->mux_reg);
+
+ return 0;
+}
+
+static const struct imx_pinctrl_soc_info imx8ulp_pinctrl_info = {
+ .pins = imx8ulp_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx8ulp_pinctrl_pads),
+ .flags = ZERO_OFFSET_VALID | SHARE_MUX_CONF_REG,
+ .gpio_set_direction = imx8ulp_pmx_gpio_set_direction,
+ .mux_mask = BM_MUX_MODE,
+ .mux_shift = BP_MUX_MODE,
+};
+
+static const struct of_device_id imx8ulp_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx8ulp-iomuxc1", },
+ { /* sentinel */ }
+};
+
+static int imx8ulp_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imx8ulp_pinctrl_info);
+}
+
+static struct platform_driver imx8ulp_pinctrl_driver = {
+ .driver = {
+ .name = "imx8ulp-pinctrl",
+ .of_match_table = imx8ulp_pinctrl_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = imx8ulp_pinctrl_probe,
+};
+
+static int __init imx8ulp_pinctrl_init(void)
+{
+ return platform_driver_register(&imx8ulp_pinctrl_driver);
+}
+arch_initcall(imx8ulp_pinctrl_init);
+
+MODULE_AUTHOR("Jacky Bai <ping.bai@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8ULP pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8365.c b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
index 22c33c3cb581..79b1fee5a1eb 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8365.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
@@ -485,7 +485,6 @@ static struct platform_driver mtk_pinctrl_driver = {
.probe = mtk_pinctrl_probe,
.driver = {
.name = "mediatek-mt8365-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = mt8365_pctrl_match,
.pm = &mtk_eint_pm_ops,
},
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 5a68e242f6b3..5cb018f98800 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -167,10 +167,14 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
PIN_GRP_GPIO("jtag", 20, 5, BIT(0), "jtag"),
PIN_GRP_GPIO("sdio0", 8, 3, BIT(1), "sdio"),
PIN_GRP_GPIO("emmc_nb", 27, 9, BIT(2), "emmc"),
- PIN_GRP_GPIO("pwm0", 11, 1, BIT(3), "pwm"),
- PIN_GRP_GPIO("pwm1", 12, 1, BIT(4), "pwm"),
- PIN_GRP_GPIO("pwm2", 13, 1, BIT(5), "pwm"),
- PIN_GRP_GPIO("pwm3", 14, 1, BIT(6), "pwm"),
+ PIN_GRP_GPIO_3("pwm0", 11, 1, BIT(3) | BIT(20), 0, BIT(20), BIT(3),
+ "pwm", "led"),
+ PIN_GRP_GPIO_3("pwm1", 12, 1, BIT(4) | BIT(21), 0, BIT(21), BIT(4),
+ "pwm", "led"),
+ PIN_GRP_GPIO_3("pwm2", 13, 1, BIT(5) | BIT(22), 0, BIT(22), BIT(5),
+ "pwm", "led"),
+ PIN_GRP_GPIO_3("pwm3", 14, 1, BIT(6) | BIT(23), 0, BIT(23), BIT(6),
+ "pwm", "led"),
PIN_GRP_GPIO("pmic1", 7, 1, BIT(7), "pmic"),
PIN_GRP_GPIO("pmic0", 6, 1, BIT(8), "pmic"),
PIN_GRP_GPIO("i2c2", 2, 2, BIT(9), "i2c"),
@@ -184,10 +188,6 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
18, 2, "gpio", "uart"),
- PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
- PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
- PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
- PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
};
static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index ce9cc719c395..2712f51eb238 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -104,6 +104,7 @@ enum jz_version {
ID_X1500,
ID_X1830,
ID_X2000,
+ ID_X2100,
};
struct ingenic_chip_info {
@@ -589,6 +590,18 @@ static int jz4755_uart0_data_pins[] = { 0x7c, 0x7d, };
static int jz4755_uart0_hwflow_pins[] = { 0x7e, 0x7f, };
static int jz4755_uart1_data_pins[] = { 0x97, 0x99, };
static int jz4755_uart2_data_pins[] = { 0x9f, };
+static int jz4755_ssi_dt_b_pins[] = { 0x3b, };
+static int jz4755_ssi_dt_f_pins[] = { 0xa1, };
+static int jz4755_ssi_dr_b_pins[] = { 0x3c, };
+static int jz4755_ssi_dr_f_pins[] = { 0xa2, };
+static int jz4755_ssi_clk_b_pins[] = { 0x3a, };
+static int jz4755_ssi_clk_f_pins[] = { 0xa0, };
+static int jz4755_ssi_gpc_b_pins[] = { 0x3e, };
+static int jz4755_ssi_gpc_f_pins[] = { 0xa4, };
+static int jz4755_ssi_ce0_b_pins[] = { 0x3d, };
+static int jz4755_ssi_ce0_f_pins[] = { 0xa3, };
+static int jz4755_ssi_ce1_b_pins[] = { 0x3f, };
+static int jz4755_ssi_ce1_f_pins[] = { 0xa5, };
static int jz4755_mmc0_1bit_pins[] = { 0x2f, 0x50, 0x5c, };
static int jz4755_mmc0_4bit_pins[] = { 0x5d, 0x5b, 0x51, };
static int jz4755_mmc1_1bit_pins[] = { 0x3a, 0x3d, 0x3c, };
@@ -630,6 +643,18 @@ static const struct group_desc jz4755_groups[] = {
INGENIC_PIN_GROUP("uart0-hwflow", jz4755_uart0_hwflow, 0),
INGENIC_PIN_GROUP("uart1-data", jz4755_uart1_data, 0),
INGENIC_PIN_GROUP("uart2-data", jz4755_uart2_data, 1),
+ INGENIC_PIN_GROUP("ssi-dt-b", jz4755_ssi_dt_b, 0),
+ INGENIC_PIN_GROUP("ssi-dt-f", jz4755_ssi_dt_f, 0),
+ INGENIC_PIN_GROUP("ssi-dr-b", jz4755_ssi_dr_b, 0),
+ INGENIC_PIN_GROUP("ssi-dr-f", jz4755_ssi_dr_f, 0),
+ INGENIC_PIN_GROUP("ssi-clk-b", jz4755_ssi_clk_b, 0),
+ INGENIC_PIN_GROUP("ssi-clk-f", jz4755_ssi_clk_f, 0),
+ INGENIC_PIN_GROUP("ssi-gpc-b", jz4755_ssi_gpc_b, 0),
+ INGENIC_PIN_GROUP("ssi-gpc-f", jz4755_ssi_gpc_f, 0),
+ INGENIC_PIN_GROUP("ssi-ce0-b", jz4755_ssi_ce0_b, 0),
+ INGENIC_PIN_GROUP("ssi-ce0-f", jz4755_ssi_ce0_f, 0),
+ INGENIC_PIN_GROUP("ssi-ce1-b", jz4755_ssi_ce1_b, 0),
+ INGENIC_PIN_GROUP("ssi-ce1-f", jz4755_ssi_ce1_f, 0),
INGENIC_PIN_GROUP_FUNCS("mmc0-1bit", jz4755_mmc0_1bit,
jz4755_mmc0_1bit_funcs),
INGENIC_PIN_GROUP_FUNCS("mmc0-4bit", jz4755_mmc0_4bit,
@@ -661,6 +686,14 @@ static const struct group_desc jz4755_groups[] = {
static const char *jz4755_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
static const char *jz4755_uart1_groups[] = { "uart1-data", };
static const char *jz4755_uart2_groups[] = { "uart2-data", };
+static const char *jz4755_ssi_groups[] = {
+ "ssi-dt-b", "ssi-dt-f",
+ "ssi-dr-b", "ssi-dr-f",
+ "ssi-clk-b", "ssi-clk-f",
+ "ssi-gpc-b", "ssi-gpc-f",
+ "ssi-ce0-b", "ssi-ce0-f",
+ "ssi-ce1-b", "ssi-ce1-f",
+};
static const char *jz4755_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
static const char *jz4755_mmc1_groups[] = { "mmc0-1bit", "mmc0-4bit", };
static const char *jz4755_i2c_groups[] = { "i2c-data", };
@@ -683,6 +716,7 @@ static const struct function_desc jz4755_functions[] = {
{ "uart0", jz4755_uart0_groups, ARRAY_SIZE(jz4755_uart0_groups), },
{ "uart1", jz4755_uart1_groups, ARRAY_SIZE(jz4755_uart1_groups), },
{ "uart2", jz4755_uart2_groups, ARRAY_SIZE(jz4755_uart2_groups), },
+ { "ssi", jz4755_ssi_groups, ARRAY_SIZE(jz4755_ssi_groups), },
{ "mmc0", jz4755_mmc0_groups, ARRAY_SIZE(jz4755_mmc0_groups), },
{ "mmc1", jz4755_mmc1_groups, ARRAY_SIZE(jz4755_mmc1_groups), },
{ "i2c", jz4755_i2c_groups, ARRAY_SIZE(jz4755_i2c_groups), },
@@ -710,7 +744,7 @@ static const struct ingenic_chip_info jz4755_chip_info = {
};
static const u32 jz4760_pull_ups[6] = {
- 0xffffffff, 0xfffcf3ff, 0xffffffff, 0xffffcfff, 0xfffffb7c, 0xfffff00f,
+ 0xffffffff, 0xfffcf3ff, 0xffffffff, 0xffffcfff, 0xfffffb7c, 0x0000000f,
};
static const u32 jz4760_pull_downs[6] = {
@@ -725,6 +759,58 @@ static int jz4760_uart2_data_pins[] = { 0x5c, 0x5e, };
static int jz4760_uart2_hwflow_pins[] = { 0x5d, 0x5f, };
static int jz4760_uart3_data_pins[] = { 0x6c, 0x85, };
static int jz4760_uart3_hwflow_pins[] = { 0x88, 0x89, };
+static int jz4760_ssi0_dt_a_pins[] = { 0x15, };
+static int jz4760_ssi0_dt_b_pins[] = { 0x35, };
+static int jz4760_ssi0_dt_d_pins[] = { 0x75, };
+static int jz4760_ssi0_dt_e_pins[] = { 0x91, };
+static int jz4760_ssi0_dr_a_pins[] = { 0x14, };
+static int jz4760_ssi0_dr_b_pins[] = { 0x34, };
+static int jz4760_ssi0_dr_d_pins[] = { 0x74, };
+static int jz4760_ssi0_dr_e_pins[] = { 0x8e, };
+static int jz4760_ssi0_clk_a_pins[] = { 0x12, };
+static int jz4760_ssi0_clk_b_pins[] = { 0x3c, };
+static int jz4760_ssi0_clk_d_pins[] = { 0x78, };
+static int jz4760_ssi0_clk_e_pins[] = { 0x8f, };
+static int jz4760_ssi0_gpc_b_pins[] = { 0x3e, };
+static int jz4760_ssi0_gpc_d_pins[] = { 0x76, };
+static int jz4760_ssi0_gpc_e_pins[] = { 0x93, };
+static int jz4760_ssi0_ce0_a_pins[] = { 0x13, };
+static int jz4760_ssi0_ce0_b_pins[] = { 0x3d, };
+static int jz4760_ssi0_ce0_d_pins[] = { 0x79, };
+static int jz4760_ssi0_ce0_e_pins[] = { 0x90, };
+static int jz4760_ssi0_ce1_b_pins[] = { 0x3f, };
+static int jz4760_ssi0_ce1_d_pins[] = { 0x77, };
+static int jz4760_ssi0_ce1_e_pins[] = { 0x92, };
+static int jz4760_ssi1_dt_b_9_pins[] = { 0x29, };
+static int jz4760_ssi1_dt_b_21_pins[] = { 0x35, };
+static int jz4760_ssi1_dt_d_12_pins[] = { 0x6c, };
+static int jz4760_ssi1_dt_d_21_pins[] = { 0x75, };
+static int jz4760_ssi1_dt_e_pins[] = { 0x91, };
+static int jz4760_ssi1_dt_f_pins[] = { 0xa3, };
+static int jz4760_ssi1_dr_b_6_pins[] = { 0x26, };
+static int jz4760_ssi1_dr_b_20_pins[] = { 0x34, };
+static int jz4760_ssi1_dr_d_13_pins[] = { 0x6d, };
+static int jz4760_ssi1_dr_d_20_pins[] = { 0x74, };
+static int jz4760_ssi1_dr_e_pins[] = { 0x8e, };
+static int jz4760_ssi1_dr_f_pins[] = { 0xa0, };
+static int jz4760_ssi1_clk_b_7_pins[] = { 0x27, };
+static int jz4760_ssi1_clk_b_28_pins[] = { 0x3c, };
+static int jz4760_ssi1_clk_d_pins[] = { 0x78, };
+static int jz4760_ssi1_clk_e_7_pins[] = { 0x87, };
+static int jz4760_ssi1_clk_e_15_pins[] = { 0x8f, };
+static int jz4760_ssi1_clk_f_pins[] = { 0xa2, };
+static int jz4760_ssi1_gpc_b_pins[] = { 0x3e, };
+static int jz4760_ssi1_gpc_d_pins[] = { 0x76, };
+static int jz4760_ssi1_gpc_e_pins[] = { 0x93, };
+static int jz4760_ssi1_ce0_b_8_pins[] = { 0x28, };
+static int jz4760_ssi1_ce0_b_29_pins[] = { 0x3d, };
+static int jz4760_ssi1_ce0_d_pins[] = { 0x79, };
+static int jz4760_ssi1_ce0_e_6_pins[] = { 0x86, };
+static int jz4760_ssi1_ce0_e_16_pins[] = { 0x90, };
+static int jz4760_ssi1_ce0_f_pins[] = { 0xa1, };
+static int jz4760_ssi1_ce1_b_pins[] = { 0x3f, };
+static int jz4760_ssi1_ce1_d_pins[] = { 0x77, };
+static int jz4760_ssi1_ce1_e_pins[] = { 0x92, };
static int jz4760_mmc0_1bit_a_pins[] = { 0x12, 0x13, 0x14, };
static int jz4760_mmc0_4bit_a_pins[] = { 0x15, 0x16, 0x17, };
static int jz4760_mmc0_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, };
@@ -801,6 +887,58 @@ static const struct group_desc jz4760_groups[] = {
INGENIC_PIN_GROUP_FUNCS("uart3-data", jz4760_uart3_data,
jz4760_uart3_data_funcs),
INGENIC_PIN_GROUP("uart3-hwflow", jz4760_uart3_hwflow, 0),
+ INGENIC_PIN_GROUP("ssi0-dt-a", jz4760_ssi0_dt_a, 2),
+ INGENIC_PIN_GROUP("ssi0-dt-b", jz4760_ssi0_dt_b, 1),
+ INGENIC_PIN_GROUP("ssi0-dt-d", jz4760_ssi0_dt_d, 1),
+ INGENIC_PIN_GROUP("ssi0-dt-e", jz4760_ssi0_dt_e, 0),
+ INGENIC_PIN_GROUP("ssi0-dr-a", jz4760_ssi0_dr_a, 1),
+ INGENIC_PIN_GROUP("ssi0-dr-b", jz4760_ssi0_dr_b, 1),
+ INGENIC_PIN_GROUP("ssi0-dr-d", jz4760_ssi0_dr_d, 1),
+ INGENIC_PIN_GROUP("ssi0-dr-e", jz4760_ssi0_dr_e, 0),
+ INGENIC_PIN_GROUP("ssi0-clk-a", jz4760_ssi0_clk_a, 2),
+ INGENIC_PIN_GROUP("ssi0-clk-b", jz4760_ssi0_clk_b, 1),
+ INGENIC_PIN_GROUP("ssi0-clk-d", jz4760_ssi0_clk_d, 1),
+ INGENIC_PIN_GROUP("ssi0-clk-e", jz4760_ssi0_clk_e, 0),
+ INGENIC_PIN_GROUP("ssi0-gpc-b", jz4760_ssi0_gpc_b, 1),
+ INGENIC_PIN_GROUP("ssi0-gpc-d", jz4760_ssi0_gpc_d, 1),
+ INGENIC_PIN_GROUP("ssi0-gpc-e", jz4760_ssi0_gpc_e, 0),
+ INGENIC_PIN_GROUP("ssi0-ce0-a", jz4760_ssi0_ce0_a, 2),
+ INGENIC_PIN_GROUP("ssi0-ce0-b", jz4760_ssi0_ce0_b, 1),
+ INGENIC_PIN_GROUP("ssi0-ce0-d", jz4760_ssi0_ce0_d, 1),
+ INGENIC_PIN_GROUP("ssi0-ce0-e", jz4760_ssi0_ce0_e, 0),
+ INGENIC_PIN_GROUP("ssi0-ce1-b", jz4760_ssi0_ce1_b, 1),
+ INGENIC_PIN_GROUP("ssi0-ce1-d", jz4760_ssi0_ce1_d, 1),
+ INGENIC_PIN_GROUP("ssi0-ce1-e", jz4760_ssi0_ce1_e, 0),
+ INGENIC_PIN_GROUP("ssi1-dt-b-9", jz4760_ssi1_dt_b_9, 2),
+ INGENIC_PIN_GROUP("ssi1-dt-b-21", jz4760_ssi1_dt_b_21, 2),
+ INGENIC_PIN_GROUP("ssi1-dt-d-12", jz4760_ssi1_dt_d_12, 2),
+ INGENIC_PIN_GROUP("ssi1-dt-d-21", jz4760_ssi1_dt_d_21, 2),
+ INGENIC_PIN_GROUP("ssi1-dt-e", jz4760_ssi1_dt_e, 1),
+ INGENIC_PIN_GROUP("ssi1-dt-f", jz4760_ssi1_dt_f, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-b-6", jz4760_ssi1_dr_b_6, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-b-20", jz4760_ssi1_dr_b_20, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-d-13", jz4760_ssi1_dr_d_13, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-d-20", jz4760_ssi1_dr_d_20, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-e", jz4760_ssi1_dr_e, 1),
+ INGENIC_PIN_GROUP("ssi1-dr-f", jz4760_ssi1_dr_f, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-b-7", jz4760_ssi1_clk_b_7, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-b-28", jz4760_ssi1_clk_b_28, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-d", jz4760_ssi1_clk_d, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-e-7", jz4760_ssi1_clk_e_7, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-e-15", jz4760_ssi1_clk_e_15, 1),
+ INGENIC_PIN_GROUP("ssi1-clk-f", jz4760_ssi1_clk_f, 2),
+ INGENIC_PIN_GROUP("ssi1-gpc-b", jz4760_ssi1_gpc_b, 2),
+ INGENIC_PIN_GROUP("ssi1-gpc-d", jz4760_ssi1_gpc_d, 2),
+ INGENIC_PIN_GROUP("ssi1-gpc-e", jz4760_ssi1_gpc_e, 1),
+ INGENIC_PIN_GROUP("ssi1-ce0-b-8", jz4760_ssi1_ce0_b_8, 2),
+ INGENIC_PIN_GROUP("ssi1-ce0-b-29", jz4760_ssi1_ce0_b_29, 2),
+ INGENIC_PIN_GROUP("ssi1-ce0-d", jz4760_ssi1_ce0_d, 2),
+ INGENIC_PIN_GROUP("ssi1-ce0-e-6", jz4760_ssi1_ce0_e_6, 2),
+ INGENIC_PIN_GROUP("ssi1-ce0-e-16", jz4760_ssi1_ce0_e_16, 1),
+ INGENIC_PIN_GROUP("ssi1-ce0-f", jz4760_ssi1_ce0_f, 2),
+ INGENIC_PIN_GROUP("ssi1-ce1-b", jz4760_ssi1_ce1_b, 2),
+ INGENIC_PIN_GROUP("ssi1-ce1-d", jz4760_ssi1_ce1_d, 2),
+ INGENIC_PIN_GROUP("ssi1-ce1-e", jz4760_ssi1_ce1_e, 1),
INGENIC_PIN_GROUP_FUNCS("mmc0-1bit-a", jz4760_mmc0_1bit_a,
jz4760_mmc0_1bit_a_funcs),
INGENIC_PIN_GROUP("mmc0-4bit-a", jz4760_mmc0_4bit_a, 1),
@@ -854,6 +992,22 @@ static const char *jz4760_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
static const char *jz4760_uart1_groups[] = { "uart1-data", "uart1-hwflow", };
static const char *jz4760_uart2_groups[] = { "uart2-data", "uart2-hwflow", };
static const char *jz4760_uart3_groups[] = { "uart3-data", "uart3-hwflow", };
+static const char *jz4760_ssi0_groups[] = {
+ "ssi0-dt-a", "ssi0-dt-b", "ssi0-dt-d", "ssi0-dt-e",
+ "ssi0-dr-a", "ssi0-dr-b", "ssi0-dr-d", "ssi0-dr-e",
+ "ssi0-clk-a", "ssi0-clk-b", "ssi0-clk-d", "ssi0-clk-e",
+ "ssi0-gpc-b", "ssi0-gpc-d", "ssi0-gpc-e",
+ "ssi0-ce0-a", "ssi0-ce0-b", "ssi0-ce0-d", "ssi0-ce0-e",
+ "ssi0-ce1-b", "ssi0-ce1-d", "ssi0-ce1-e",
+};
+static const char *jz4760_ssi1_groups[] = {
+ "ssi1-dt-b-9", "ssi1-dt-b-21", "ssi1-dt-d-12", "ssi1-dt-d-21", "ssi1-dt-e", "ssi1-dt-f",
+ "ssi1-dr-b-6", "ssi1-dr-b-20", "ssi1-dr-d-13", "ssi1-dr-d-20", "ssi1-dr-e", "ssi1-dr-f",
+ "ssi1-clk-b-7", "ssi1-clk-b-28", "ssi1-clk-d", "ssi1-clk-e-7", "ssi1-clk-e-15", "ssi1-clk-f",
+ "ssi1-gpc-b", "ssi1-gpc-d", "ssi1-gpc-e",
+ "ssi1-ce0-b-8", "ssi1-ce0-b-29", "ssi1-ce0-d", "ssi1-ce0-e-6", "ssi1-ce0-e-16", "ssi1-ce0-f",
+ "ssi1-ce1-b", "ssi1-ce1-d", "ssi1-ce1-e",
+};
static const char *jz4760_mmc0_groups[] = {
"mmc0-1bit-a", "mmc0-4bit-a",
"mmc0-1bit-e", "mmc0-4bit-e", "mmc0-8bit-e",
@@ -898,6 +1052,8 @@ static const struct function_desc jz4760_functions[] = {
{ "uart1", jz4760_uart1_groups, ARRAY_SIZE(jz4760_uart1_groups), },
{ "uart2", jz4760_uart2_groups, ARRAY_SIZE(jz4760_uart2_groups), },
{ "uart3", jz4760_uart3_groups, ARRAY_SIZE(jz4760_uart3_groups), },
+ { "ssi0", jz4760_ssi0_groups, ARRAY_SIZE(jz4760_ssi0_groups), },
+ { "ssi1", jz4760_ssi1_groups, ARRAY_SIZE(jz4760_ssi1_groups), },
{ "mmc0", jz4760_mmc0_groups, ARRAY_SIZE(jz4760_mmc0_groups), },
{ "mmc1", jz4760_mmc1_groups, ARRAY_SIZE(jz4760_mmc1_groups), },
{ "mmc2", jz4760_mmc2_groups, ARRAY_SIZE(jz4760_mmc2_groups), },
@@ -936,11 +1092,11 @@ static const struct ingenic_chip_info jz4760_chip_info = {
};
static const u32 jz4770_pull_ups[6] = {
- 0x3fffffff, 0xfff0030c, 0xffffffff, 0xffff4fff, 0xfffffb7c, 0xffa7f00f,
+ 0x3fffffff, 0xfff0f3fc, 0xffffffff, 0xffff4fff, 0xfffffb7c, 0x0024f00f,
};
static const u32 jz4770_pull_downs[6] = {
- 0x00000000, 0x000f0c03, 0x00000000, 0x0000b000, 0x00000483, 0x00580ff0,
+ 0x00000000, 0x000f0c03, 0x00000000, 0x0000b000, 0x00000483, 0x005b0ff0,
};
static int jz4770_uart0_data_pins[] = { 0xa0, 0xa3, };
@@ -1827,7 +1983,9 @@ static int x1000_uart1_data_d_pins[] = { 0x62, 0x63, };
static int x1000_uart1_hwflow_pins[] = { 0x64, 0x65, };
static int x1000_uart2_data_a_pins[] = { 0x02, 0x03, };
static int x1000_uart2_data_d_pins[] = { 0x65, 0x64, };
-static int x1000_sfc_pins[] = { 0x1d, 0x1c, 0x1e, 0x1f, 0x1a, 0x1b, };
+static int x1000_sfc_data_pins[] = { 0x1d, 0x1c, 0x1e, 0x1f, };
+static int x1000_sfc_clk_pins[] = { 0x1a, };
+static int x1000_sfc_ce_pins[] = { 0x1b, };
static int x1000_ssi_dt_a_22_pins[] = { 0x16, };
static int x1000_ssi_dt_a_29_pins[] = { 0x1d, };
static int x1000_ssi_dt_d_pins[] = { 0x62, };
@@ -1871,8 +2029,8 @@ static int x1000_i2s_data_tx_pins[] = { 0x24, };
static int x1000_i2s_data_rx_pins[] = { 0x23, };
static int x1000_i2s_clk_txrx_pins[] = { 0x21, 0x22, };
static int x1000_i2s_sysclk_pins[] = { 0x20, };
-static int x1000_dmic0_pins[] = { 0x35, 0x36, };
-static int x1000_dmic1_pins[] = { 0x25, };
+static int x1000_dmic_if0_pins[] = { 0x35, 0x36, };
+static int x1000_dmic_if1_pins[] = { 0x25, };
static int x1000_cim_pins[] = {
0x08, 0x09, 0x0a, 0x0b,
0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0c,
@@ -1901,7 +2059,9 @@ static const struct group_desc x1000_groups[] = {
INGENIC_PIN_GROUP("uart1-hwflow", x1000_uart1_hwflow, 1),
INGENIC_PIN_GROUP("uart2-data-a", x1000_uart2_data_a, 2),
INGENIC_PIN_GROUP("uart2-data-d", x1000_uart2_data_d, 0),
- INGENIC_PIN_GROUP("sfc", x1000_sfc, 1),
+ INGENIC_PIN_GROUP("sfc-data", x1000_sfc_data, 1),
+ INGENIC_PIN_GROUP("sfc-clk", x1000_sfc_clk, 1),
+ INGENIC_PIN_GROUP("sfc-ce", x1000_sfc_ce, 1),
INGENIC_PIN_GROUP("ssi-dt-a-22", x1000_ssi_dt_a_22, 2),
INGENIC_PIN_GROUP("ssi-dt-a-29", x1000_ssi_dt_a_29, 2),
INGENIC_PIN_GROUP("ssi-dt-d", x1000_ssi_dt_d, 0),
@@ -1938,8 +2098,8 @@ static const struct group_desc x1000_groups[] = {
INGENIC_PIN_GROUP("i2s-data-rx", x1000_i2s_data_rx, 1),
INGENIC_PIN_GROUP("i2s-clk-txrx", x1000_i2s_clk_txrx, 1),
INGENIC_PIN_GROUP("i2s-sysclk", x1000_i2s_sysclk, 1),
- INGENIC_PIN_GROUP("dmic0", x1000_dmic0, 0),
- INGENIC_PIN_GROUP("dmic1", x1000_dmic1, 1),
+ INGENIC_PIN_GROUP("dmic-if0", x1000_dmic_if0, 0),
+ INGENIC_PIN_GROUP("dmic-if1", x1000_dmic_if1, 1),
INGENIC_PIN_GROUP("cim-data", x1000_cim, 2),
INGENIC_PIN_GROUP("lcd-8bit", x1000_lcd_8bit, 1),
INGENIC_PIN_GROUP("lcd-16bit", x1000_lcd_16bit, 1),
@@ -1956,7 +2116,7 @@ static const char *x1000_uart1_groups[] = {
"uart1-data-a", "uart1-data-d", "uart1-hwflow",
};
static const char *x1000_uart2_groups[] = { "uart2-data-a", "uart2-data-d", };
-static const char *x1000_sfc_groups[] = { "sfc", };
+static const char *x1000_sfc_groups[] = { "sfc-data", "sfc-clk", "sfc-ce", };
static const char *x1000_ssi_groups[] = {
"ssi-dt-a-22", "ssi-dt-a-29", "ssi-dt-d",
"ssi-dr-a-23", "ssi-dr-a-28", "ssi-dr-d",
@@ -1983,7 +2143,7 @@ static const char *x1000_i2c2_groups[] = { "i2c2-data", };
static const char *x1000_i2s_groups[] = {
"i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-sysclk",
};
-static const char *x1000_dmic_groups[] = { "dmic0", "dmic1", };
+static const char *x1000_dmic_groups[] = { "dmic-if0", "dmic-if1", };
static const char *x1000_cim_groups[] = { "cim-data", };
static const char *x1000_lcd_groups[] = { "lcd-8bit", "lcd-16bit", };
static const char *x1000_pwm0_groups[] = { "pwm0", };
@@ -2048,8 +2208,8 @@ static int x1500_i2s_data_tx_pins[] = { 0x24, };
static int x1500_i2s_data_rx_pins[] = { 0x23, };
static int x1500_i2s_clk_txrx_pins[] = { 0x21, 0x22, };
static int x1500_i2s_sysclk_pins[] = { 0x20, };
-static int x1500_dmic0_pins[] = { 0x35, 0x36, };
-static int x1500_dmic1_pins[] = { 0x25, };
+static int x1500_dmic_if0_pins[] = { 0x35, 0x36, };
+static int x1500_dmic_if1_pins[] = { 0x25, };
static int x1500_cim_pins[] = {
0x08, 0x09, 0x0a, 0x0b,
0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0c,
@@ -2068,7 +2228,9 @@ static const struct group_desc x1500_groups[] = {
INGENIC_PIN_GROUP("uart1-hwflow", x1500_uart1_hwflow, 1),
INGENIC_PIN_GROUP("uart2-data-a", x1500_uart2_data_a, 2),
INGENIC_PIN_GROUP("uart2-data-d", x1500_uart2_data_d, 0),
- INGENIC_PIN_GROUP("sfc", x1000_sfc, 1),
+ INGENIC_PIN_GROUP("sfc-data", x1000_sfc_data, 1),
+ INGENIC_PIN_GROUP("sfc-clk", x1000_sfc_clk, 1),
+ INGENIC_PIN_GROUP("sfc-ce", x1000_sfc_ce, 1),
INGENIC_PIN_GROUP("mmc-1bit", x1500_mmc_1bit, 1),
INGENIC_PIN_GROUP("mmc-4bit", x1500_mmc_4bit, 1),
INGENIC_PIN_GROUP("i2c0-data", x1500_i2c0, 0),
@@ -2079,8 +2241,8 @@ static const struct group_desc x1500_groups[] = {
INGENIC_PIN_GROUP("i2s-data-rx", x1500_i2s_data_rx, 1),
INGENIC_PIN_GROUP("i2s-clk-txrx", x1500_i2s_clk_txrx, 1),
INGENIC_PIN_GROUP("i2s-sysclk", x1500_i2s_sysclk, 1),
- INGENIC_PIN_GROUP("dmic0", x1500_dmic0, 0),
- INGENIC_PIN_GROUP("dmic1", x1500_dmic1, 1),
+ INGENIC_PIN_GROUP("dmic-if0", x1500_dmic_if0, 0),
+ INGENIC_PIN_GROUP("dmic-if1", x1500_dmic_if1, 1),
INGENIC_PIN_GROUP("cim-data", x1500_cim, 2),
INGENIC_PIN_GROUP("pwm0", x1500_pwm_pwm0, 0),
INGENIC_PIN_GROUP("pwm1", x1500_pwm_pwm1, 1),
@@ -2101,7 +2263,7 @@ static const char *x1500_i2c2_groups[] = { "i2c2-data", };
static const char *x1500_i2s_groups[] = {
"i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-sysclk",
};
-static const char *x1500_dmic_groups[] = { "dmic0", "dmic1", };
+static const char *x1500_dmic_groups[] = { "dmic-if0", "dmic-if1", };
static const char *x1500_cim_groups[] = { "cim-data", };
static const char *x1500_pwm0_groups[] = { "pwm0", };
static const char *x1500_pwm1_groups[] = { "pwm1", };
@@ -2151,7 +2313,9 @@ static const u32 x1830_pull_downs[4] = {
static int x1830_uart0_data_pins[] = { 0x33, 0x36, };
static int x1830_uart0_hwflow_pins[] = { 0x34, 0x35, };
static int x1830_uart1_data_pins[] = { 0x38, 0x37, };
-static int x1830_sfc_pins[] = { 0x17, 0x18, 0x1a, 0x19, 0x1b, 0x1c, };
+static int x1830_sfc_data_pins[] = { 0x17, 0x18, 0x1a, 0x19, };
+static int x1830_sfc_clk_pins[] = { 0x1b, };
+static int x1830_sfc_ce_pins[] = { 0x1c, };
static int x1830_ssi0_dt_pins[] = { 0x4c, };
static int x1830_ssi0_dr_pins[] = { 0x4b, };
static int x1830_ssi0_clk_pins[] = { 0x4f, };
@@ -2182,8 +2346,8 @@ static int x1830_i2s_data_rx_pins[] = { 0x54, };
static int x1830_i2s_clk_txrx_pins[] = { 0x58, 0x52, };
static int x1830_i2s_clk_rx_pins[] = { 0x56, 0x55, };
static int x1830_i2s_sysclk_pins[] = { 0x57, };
-static int x1830_dmic0_pins[] = { 0x48, 0x59, };
-static int x1830_dmic1_pins[] = { 0x5a, };
+static int x1830_dmic_if0_pins[] = { 0x48, 0x59, };
+static int x1830_dmic_if1_pins[] = { 0x5a, };
static int x1830_lcd_tft_8bit_pins[] = {
0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x73, 0x72, 0x69,
@@ -2223,7 +2387,9 @@ static const struct group_desc x1830_groups[] = {
INGENIC_PIN_GROUP("uart0-data", x1830_uart0_data, 0),
INGENIC_PIN_GROUP("uart0-hwflow", x1830_uart0_hwflow, 0),
INGENIC_PIN_GROUP("uart1-data", x1830_uart1_data, 0),
- INGENIC_PIN_GROUP("sfc", x1830_sfc, 1),
+ INGENIC_PIN_GROUP("sfc-data", x1830_sfc_data, 1),
+ INGENIC_PIN_GROUP("sfc-clk", x1830_sfc_clk, 1),
+ INGENIC_PIN_GROUP("sfc-ce", x1830_sfc_ce, 1),
INGENIC_PIN_GROUP("ssi0-dt", x1830_ssi0_dt, 0),
INGENIC_PIN_GROUP("ssi0-dr", x1830_ssi0_dr, 0),
INGENIC_PIN_GROUP("ssi0-clk", x1830_ssi0_clk, 0),
@@ -2254,8 +2420,8 @@ static const struct group_desc x1830_groups[] = {
INGENIC_PIN_GROUP("i2s-clk-txrx", x1830_i2s_clk_txrx, 0),
INGENIC_PIN_GROUP("i2s-clk-rx", x1830_i2s_clk_rx, 0),
INGENIC_PIN_GROUP("i2s-sysclk", x1830_i2s_sysclk, 0),
- INGENIC_PIN_GROUP("dmic0", x1830_dmic0, 2),
- INGENIC_PIN_GROUP("dmic1", x1830_dmic1, 2),
+ INGENIC_PIN_GROUP("dmic-if0", x1830_dmic_if0, 2),
+ INGENIC_PIN_GROUP("dmic-if1", x1830_dmic_if1, 2),
INGENIC_PIN_GROUP("lcd-tft-8bit", x1830_lcd_tft_8bit, 0),
INGENIC_PIN_GROUP("lcd-tft-24bit", x1830_lcd_tft_24bit, 0),
INGENIC_PIN_GROUP("lcd-slcd-8bit", x1830_lcd_slcd_8bit, 1),
@@ -2281,7 +2447,7 @@ static const struct group_desc x1830_groups[] = {
static const char *x1830_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
static const char *x1830_uart1_groups[] = { "uart1-data", };
-static const char *x1830_sfc_groups[] = { "sfc", };
+static const char *x1830_sfc_groups[] = { "sfc-data", "sfc-clk", "sfc-ce", };
static const char *x1830_ssi0_groups[] = {
"ssi0-dt", "ssi0-dr", "ssi0-clk", "ssi0-gpc", "ssi0-ce0", "ssi0-ce1",
};
@@ -2301,7 +2467,7 @@ static const char *x1830_i2c2_groups[] = { "i2c2-data", };
static const char *x1830_i2s_groups[] = {
"i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-clk-rx", "i2s-sysclk",
};
-static const char *x1830_dmic_groups[] = { "dmic0", "dmic1", };
+static const char *x1830_dmic_groups[] = { "dmic-if0", "dmic-if1", };
static const char *x1830_lcd_groups[] = {
"lcd-tft-8bit", "lcd-tft-24bit", "lcd-slcd-8bit", "lcd-slcd-16bit",
};
@@ -2381,17 +2547,21 @@ static int x2000_uart7_data_a_pins[] = { 0x08, 0x09, };
static int x2000_uart7_data_c_pins[] = { 0x41, 0x42, };
static int x2000_uart8_data_pins[] = { 0x3c, 0x3d, };
static int x2000_uart9_data_pins[] = { 0x3e, 0x3f, };
-static int x2000_sfc0_d_pins[] = { 0x73, 0x74, 0x75, 0x76, 0x71, 0x72, };
-static int x2000_sfc0_e_pins[] = { 0x92, 0x93, 0x94, 0x95, 0x90, 0x91, };
-static int x2000_sfc1_pins[] = { 0x77, 0x78, 0x79, 0x7a, };
+static int x2000_sfc_data_if0_d_pins[] = { 0x73, 0x74, 0x75, 0x76, };
+static int x2000_sfc_data_if0_e_pins[] = { 0x92, 0x93, 0x94, 0x95, };
+static int x2000_sfc_data_if1_pins[] = { 0x77, 0x78, 0x79, 0x7a, };
+static int x2000_sfc_clk_d_pins[] = { 0x71, };
+static int x2000_sfc_clk_e_pins[] = { 0x90, };
+static int x2000_sfc_ce_d_pins[] = { 0x72, };
+static int x2000_sfc_ce_e_pins[] = { 0x91, };
static int x2000_ssi0_dt_b_pins[] = { 0x3e, };
static int x2000_ssi0_dt_d_pins[] = { 0x69, };
static int x2000_ssi0_dr_b_pins[] = { 0x3d, };
static int x2000_ssi0_dr_d_pins[] = { 0x6a, };
static int x2000_ssi0_clk_b_pins[] = { 0x3f, };
static int x2000_ssi0_clk_d_pins[] = { 0x68, };
-static int x2000_ssi0_ce0_b_pins[] = { 0x3c, };
-static int x2000_ssi0_ce0_d_pins[] = { 0x6d, };
+static int x2000_ssi0_ce_b_pins[] = { 0x3c, };
+static int x2000_ssi0_ce_d_pins[] = { 0x6d, };
static int x2000_ssi1_dt_c_pins[] = { 0x4b, };
static int x2000_ssi1_dt_d_pins[] = { 0x72, };
static int x2000_ssi1_dt_e_pins[] = { 0x91, };
@@ -2401,9 +2571,9 @@ static int x2000_ssi1_dr_e_pins[] = { 0x92, };
static int x2000_ssi1_clk_c_pins[] = { 0x4c, };
static int x2000_ssi1_clk_d_pins[] = { 0x71, };
static int x2000_ssi1_clk_e_pins[] = { 0x90, };
-static int x2000_ssi1_ce0_c_pins[] = { 0x49, };
-static int x2000_ssi1_ce0_d_pins[] = { 0x76, };
-static int x2000_ssi1_ce0_e_pins[] = { 0x95, };
+static int x2000_ssi1_ce_c_pins[] = { 0x49, };
+static int x2000_ssi1_ce_d_pins[] = { 0x76, };
+static int x2000_ssi1_ce_e_pins[] = { 0x95, };
static int x2000_mmc0_1bit_pins[] = { 0x71, 0x72, 0x73, };
static int x2000_mmc0_4bit_pins[] = { 0x74, 0x75, 0x75, };
static int x2000_mmc0_8bit_pins[] = { 0x77, 0x78, 0x79, 0x7a, };
@@ -2455,10 +2625,10 @@ static int x2000_i2s3_data_tx2_pins[] = { 0x05, };
static int x2000_i2s3_data_tx3_pins[] = { 0x06, };
static int x2000_i2s3_clk_tx_pins[] = { 0x10, 0x02, };
static int x2000_i2s3_sysclk_tx_pins[] = { 0x00, };
-static int x2000_dmic0_pins[] = { 0x54, 0x55, };
-static int x2000_dmic1_pins[] = { 0x56, };
-static int x2000_dmic2_pins[] = { 0x57, };
-static int x2000_dmic3_pins[] = { 0x58, };
+static int x2000_dmic_if0_pins[] = { 0x54, 0x55, };
+static int x2000_dmic_if1_pins[] = { 0x56, };
+static int x2000_dmic_if2_pins[] = { 0x57, };
+static int x2000_dmic_if3_pins[] = { 0x58, };
static int x2000_cim_8bit_pins[] = {
0x0e, 0x0c, 0x0d, 0x4f,
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
@@ -2545,17 +2715,21 @@ static const struct group_desc x2000_groups[] = {
INGENIC_PIN_GROUP("uart7-data-c", x2000_uart7_data_c, 3),
INGENIC_PIN_GROUP("uart8-data", x2000_uart8_data, 3),
INGENIC_PIN_GROUP("uart9-data", x2000_uart9_data, 3),
- INGENIC_PIN_GROUP("sfc0-d", x2000_sfc0_d, 1),
- INGENIC_PIN_GROUP("sfc0-e", x2000_sfc0_e, 0),
- INGENIC_PIN_GROUP("sfc1", x2000_sfc1, 1),
+ INGENIC_PIN_GROUP("sfc-data-if0-d", x2000_sfc_data_if0_d, 1),
+ INGENIC_PIN_GROUP("sfc-data-if0-e", x2000_sfc_data_if0_e, 0),
+ INGENIC_PIN_GROUP("sfc-data-if1", x2000_sfc_data_if1, 1),
+ INGENIC_PIN_GROUP("sfc-clk-d", x2000_sfc_clk_d, 1),
+ INGENIC_PIN_GROUP("sfc-clk-e", x2000_sfc_clk_e, 0),
+ INGENIC_PIN_GROUP("sfc-ce-d", x2000_sfc_ce_d, 1),
+ INGENIC_PIN_GROUP("sfc-ce-e", x2000_sfc_ce_e, 0),
INGENIC_PIN_GROUP("ssi0-dt-b", x2000_ssi0_dt_b, 1),
INGENIC_PIN_GROUP("ssi0-dt-d", x2000_ssi0_dt_d, 1),
INGENIC_PIN_GROUP("ssi0-dr-b", x2000_ssi0_dr_b, 1),
INGENIC_PIN_GROUP("ssi0-dr-d", x2000_ssi0_dr_d, 1),
INGENIC_PIN_GROUP("ssi0-clk-b", x2000_ssi0_clk_b, 1),
INGENIC_PIN_GROUP("ssi0-clk-d", x2000_ssi0_clk_d, 1),
- INGENIC_PIN_GROUP("ssi0-ce0-b", x2000_ssi0_ce0_b, 1),
- INGENIC_PIN_GROUP("ssi0-ce0-d", x2000_ssi0_ce0_d, 1),
+ INGENIC_PIN_GROUP("ssi0-ce-b", x2000_ssi0_ce_b, 1),
+ INGENIC_PIN_GROUP("ssi0-ce-d", x2000_ssi0_ce_d, 1),
INGENIC_PIN_GROUP("ssi1-dt-c", x2000_ssi1_dt_c, 2),
INGENIC_PIN_GROUP("ssi1-dt-d", x2000_ssi1_dt_d, 2),
INGENIC_PIN_GROUP("ssi1-dt-e", x2000_ssi1_dt_e, 1),
@@ -2565,9 +2739,9 @@ static const struct group_desc x2000_groups[] = {
INGENIC_PIN_GROUP("ssi1-clk-c", x2000_ssi1_clk_c, 2),
INGENIC_PIN_GROUP("ssi1-clk-d", x2000_ssi1_clk_d, 2),
INGENIC_PIN_GROUP("ssi1-clk-e", x2000_ssi1_clk_e, 1),
- INGENIC_PIN_GROUP("ssi1-ce0-c", x2000_ssi1_ce0_c, 2),
- INGENIC_PIN_GROUP("ssi1-ce0-d", x2000_ssi1_ce0_d, 2),
- INGENIC_PIN_GROUP("ssi1-ce0-e", x2000_ssi1_ce0_e, 1),
+ INGENIC_PIN_GROUP("ssi1-ce-c", x2000_ssi1_ce_c, 2),
+ INGENIC_PIN_GROUP("ssi1-ce-d", x2000_ssi1_ce_d, 2),
+ INGENIC_PIN_GROUP("ssi1-ce-e", x2000_ssi1_ce_e, 1),
INGENIC_PIN_GROUP("mmc0-1bit", x2000_mmc0_1bit, 0),
INGENIC_PIN_GROUP("mmc0-4bit", x2000_mmc0_4bit, 0),
INGENIC_PIN_GROUP("mmc0-8bit", x2000_mmc0_8bit, 0),
@@ -2612,10 +2786,10 @@ static const struct group_desc x2000_groups[] = {
INGENIC_PIN_GROUP("i2s3-data-tx3", x2000_i2s3_data_tx3, 2),
INGENIC_PIN_GROUP("i2s3-clk-tx", x2000_i2s3_clk_tx, 2),
INGENIC_PIN_GROUP("i2s3-sysclk-tx", x2000_i2s3_sysclk_tx, 2),
- INGENIC_PIN_GROUP("dmic0", x2000_dmic0, 0),
- INGENIC_PIN_GROUP("dmic1", x2000_dmic1, 0),
- INGENIC_PIN_GROUP("dmic2", x2000_dmic2, 0),
- INGENIC_PIN_GROUP("dmic3", x2000_dmic3, 0),
+ INGENIC_PIN_GROUP("dmic-if0", x2000_dmic_if0, 0),
+ INGENIC_PIN_GROUP("dmic-if1", x2000_dmic_if1, 0),
+ INGENIC_PIN_GROUP("dmic-if2", x2000_dmic_if2, 0),
+ INGENIC_PIN_GROUP("dmic-if3", x2000_dmic_if3, 0),
INGENIC_PIN_GROUP_FUNCS("cim-data-8bit", x2000_cim_8bit,
x2000_cim_8bit_funcs),
INGENIC_PIN_GROUP("cim-data-12bit", x2000_cim_12bit, 0),
@@ -2670,18 +2844,21 @@ static const char *x2000_uart6_groups[] = { "uart6-data-a", "uart6-data-c", };
static const char *x2000_uart7_groups[] = { "uart7-data-a", "uart7-data-c", };
static const char *x2000_uart8_groups[] = { "uart8-data", };
static const char *x2000_uart9_groups[] = { "uart9-data", };
-static const char *x2000_sfc_groups[] = { "sfc0-d", "sfc0-e", "sfc1", };
+static const char *x2000_sfc_groups[] = {
+ "sfc-data-if0-d", "sfc-data-if0-e", "sfc-data-if1",
+ "sfc-clk-d", "sfc-clk-e", "sfc-ce-d", "sfc-ce-e",
+};
static const char *x2000_ssi0_groups[] = {
"ssi0-dt-b", "ssi0-dt-d",
"ssi0-dr-b", "ssi0-dr-d",
"ssi0-clk-b", "ssi0-clk-d",
- "ssi0-ce0-b", "ssi0-ce0-d",
+ "ssi0-ce-b", "ssi0-ce-d",
};
static const char *x2000_ssi1_groups[] = {
"ssi1-dt-c", "ssi1-dt-d", "ssi1-dt-e",
"ssi1-dr-c", "ssi1-dr-d", "ssi1-dr-e",
"ssi1-clk-c", "ssi1-clk-d", "ssi1-clk-e",
- "ssi1-ce0-c", "ssi1-ce0-d", "ssi1-ce0-e",
+ "ssi1-ce-c", "ssi1-ce-d", "ssi1-ce-e",
};
static const char *x2000_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", "mmc0-8bit", };
static const char *x2000_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
@@ -2711,7 +2888,9 @@ static const char *x2000_i2s3_groups[] = {
"i2s3-data-tx0", "i2s3-data-tx1", "i2s3-data-tx2", "i2s3-data-tx3",
"i2s3-clk-tx", "i2s3-sysclk-tx",
};
-static const char *x2000_dmic_groups[] = { "dmic0", "dmic1", "dmic2", "dmic3", };
+static const char *x2000_dmic_groups[] = {
+ "dmic-if0", "dmic-if1", "dmic-if2", "dmic-if3",
+};
static const char *x2000_cim_groups[] = { "cim-data-8bit", "cim-data-12bit", };
static const char *x2000_lcd_groups[] = {
"lcd-tft-8bit", "lcd-tft-16bit", "lcd-tft-18bit", "lcd-tft-24bit",
@@ -2802,6 +2981,216 @@ static const struct ingenic_chip_info x2000_chip_info = {
.pull_downs = x2000_pull_downs,
};
+static const u32 x2100_pull_ups[5] = {
+ 0x0003ffff, 0xffffffff, 0x1ff0ffff, 0xc7fe3f3f, 0x0fbf003f,
+};
+
+static const u32 x2100_pull_downs[5] = {
+ 0x0003ffff, 0xffffffff, 0x1ff0ffff, 0x00000000, 0x0fbf003f,
+};
+
+static int x2100_mac_pins[] = {
+ 0x4b, 0x47, 0x46, 0x4a, 0x43, 0x42, 0x4c, 0x4d, 0x4f, 0x41,
+};
+
+static const struct group_desc x2100_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", x2000_uart0_data, 2),
+ INGENIC_PIN_GROUP("uart0-hwflow", x2000_uart0_hwflow, 2),
+ INGENIC_PIN_GROUP("uart1-data", x2000_uart1_data, 1),
+ INGENIC_PIN_GROUP("uart1-hwflow", x2000_uart1_hwflow, 1),
+ INGENIC_PIN_GROUP("uart2-data", x2000_uart2_data, 0),
+ INGENIC_PIN_GROUP("uart3-data-c", x2000_uart3_data_c, 0),
+ INGENIC_PIN_GROUP("uart3-data-d", x2000_uart3_data_d, 1),
+ INGENIC_PIN_GROUP("uart3-hwflow-c", x2000_uart3_hwflow_c, 0),
+ INGENIC_PIN_GROUP("uart3-hwflow-d", x2000_uart3_hwflow_d, 1),
+ INGENIC_PIN_GROUP("uart4-data-a", x2000_uart4_data_a, 1),
+ INGENIC_PIN_GROUP("uart4-data-c", x2000_uart4_data_c, 3),
+ INGENIC_PIN_GROUP("uart4-hwflow-a", x2000_uart4_hwflow_a, 1),
+ INGENIC_PIN_GROUP("uart4-hwflow-c", x2000_uart4_hwflow_c, 3),
+ INGENIC_PIN_GROUP("uart5-data-a", x2000_uart5_data_a, 1),
+ INGENIC_PIN_GROUP("uart5-data-c", x2000_uart5_data_c, 3),
+ INGENIC_PIN_GROUP("uart6-data-a", x2000_uart6_data_a, 1),
+ INGENIC_PIN_GROUP("uart6-data-c", x2000_uart6_data_c, 3),
+ INGENIC_PIN_GROUP("uart7-data-a", x2000_uart7_data_a, 1),
+ INGENIC_PIN_GROUP("uart7-data-c", x2000_uart7_data_c, 3),
+ INGENIC_PIN_GROUP("uart8-data", x2000_uart8_data, 3),
+ INGENIC_PIN_GROUP("uart9-data", x2000_uart9_data, 3),
+ INGENIC_PIN_GROUP("sfc-data-if0-d", x2000_sfc_data_if0_d, 1),
+ INGENIC_PIN_GROUP("sfc-data-if0-e", x2000_sfc_data_if0_e, 0),
+ INGENIC_PIN_GROUP("sfc-data-if1", x2000_sfc_data_if1, 1),
+ INGENIC_PIN_GROUP("sfc-clk-d", x2000_sfc_clk_d, 1),
+ INGENIC_PIN_GROUP("sfc-clk-e", x2000_sfc_clk_e, 0),
+ INGENIC_PIN_GROUP("sfc-ce-d", x2000_sfc_ce_d, 1),
+ INGENIC_PIN_GROUP("sfc-ce-e", x2000_sfc_ce_e, 0),
+ INGENIC_PIN_GROUP("ssi0-dt-b", x2000_ssi0_dt_b, 1),
+ INGENIC_PIN_GROUP("ssi0-dt-d", x2000_ssi0_dt_d, 1),
+ INGENIC_PIN_GROUP("ssi0-dr-b", x2000_ssi0_dr_b, 1),
+ INGENIC_PIN_GROUP("ssi0-dr-d", x2000_ssi0_dr_d, 1),
+ INGENIC_PIN_GROUP("ssi0-clk-b", x2000_ssi0_clk_b, 1),
+ INGENIC_PIN_GROUP("ssi0-clk-d", x2000_ssi0_clk_d, 1),
+ INGENIC_PIN_GROUP("ssi0-ce-b", x2000_ssi0_ce_b, 1),
+ INGENIC_PIN_GROUP("ssi0-ce-d", x2000_ssi0_ce_d, 1),
+ INGENIC_PIN_GROUP("ssi1-dt-c", x2000_ssi1_dt_c, 2),
+ INGENIC_PIN_GROUP("ssi1-dt-d", x2000_ssi1_dt_d, 2),
+ INGENIC_PIN_GROUP("ssi1-dt-e", x2000_ssi1_dt_e, 1),
+ INGENIC_PIN_GROUP("ssi1-dr-c", x2000_ssi1_dr_c, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-d", x2000_ssi1_dr_d, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-e", x2000_ssi1_dr_e, 1),
+ INGENIC_PIN_GROUP("ssi1-clk-c", x2000_ssi1_clk_c, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-d", x2000_ssi1_clk_d, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-e", x2000_ssi1_clk_e, 1),
+ INGENIC_PIN_GROUP("ssi1-ce-c", x2000_ssi1_ce_c, 2),
+ INGENIC_PIN_GROUP("ssi1-ce-d", x2000_ssi1_ce_d, 2),
+ INGENIC_PIN_GROUP("ssi1-ce-e", x2000_ssi1_ce_e, 1),
+ INGENIC_PIN_GROUP("mmc0-1bit", x2000_mmc0_1bit, 0),
+ INGENIC_PIN_GROUP("mmc0-4bit", x2000_mmc0_4bit, 0),
+ INGENIC_PIN_GROUP("mmc0-8bit", x2000_mmc0_8bit, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit", x2000_mmc1_1bit, 0),
+ INGENIC_PIN_GROUP("mmc1-4bit", x2000_mmc1_4bit, 0),
+ INGENIC_PIN_GROUP("mmc2-1bit", x2000_mmc2_1bit, 0),
+ INGENIC_PIN_GROUP("mmc2-4bit", x2000_mmc2_4bit, 0),
+ INGENIC_PIN_GROUP("emc-8bit-data", x2000_emc_8bit_data, 0),
+ INGENIC_PIN_GROUP("emc-16bit-data", x2000_emc_16bit_data, 0),
+ INGENIC_PIN_GROUP("emc-addr", x2000_emc_addr, 0),
+ INGENIC_PIN_GROUP("emc-rd-we", x2000_emc_rd_we, 0),
+ INGENIC_PIN_GROUP("emc-wait", x2000_emc_wait, 0),
+ INGENIC_PIN_GROUP("emc-cs1", x2000_emc_cs1, 3),
+ INGENIC_PIN_GROUP("emc-cs2", x2000_emc_cs2, 3),
+ INGENIC_PIN_GROUP("i2c0-data", x2000_i2c0, 3),
+ INGENIC_PIN_GROUP("i2c1-data-c", x2000_i2c1_c, 2),
+ INGENIC_PIN_GROUP("i2c1-data-d", x2000_i2c1_d, 1),
+ INGENIC_PIN_GROUP("i2c2-data-b", x2000_i2c2_b, 2),
+ INGENIC_PIN_GROUP("i2c2-data-d", x2000_i2c2_d, 2),
+ INGENIC_PIN_GROUP("i2c2-data-e", x2000_i2c2_e, 1),
+ INGENIC_PIN_GROUP("i2c3-data-a", x2000_i2c3_a, 0),
+ INGENIC_PIN_GROUP("i2c3-data-d", x2000_i2c3_d, 1),
+ INGENIC_PIN_GROUP("i2c4-data-c", x2000_i2c4_c, 1),
+ INGENIC_PIN_GROUP("i2c4-data-d", x2000_i2c4_d, 2),
+ INGENIC_PIN_GROUP("i2c5-data-c", x2000_i2c5_c, 1),
+ INGENIC_PIN_GROUP("i2c5-data-d", x2000_i2c5_d, 1),
+ INGENIC_PIN_GROUP("i2s1-data-tx", x2000_i2s1_data_tx, 2),
+ INGENIC_PIN_GROUP("i2s1-data-rx", x2000_i2s1_data_rx, 2),
+ INGENIC_PIN_GROUP("i2s1-clk-tx", x2000_i2s1_clk_tx, 2),
+ INGENIC_PIN_GROUP("i2s1-clk-rx", x2000_i2s1_clk_rx, 2),
+ INGENIC_PIN_GROUP("i2s1-sysclk-tx", x2000_i2s1_sysclk_tx, 2),
+ INGENIC_PIN_GROUP("i2s1-sysclk-rx", x2000_i2s1_sysclk_rx, 2),
+ INGENIC_PIN_GROUP("i2s2-data-rx0", x2000_i2s2_data_rx0, 2),
+ INGENIC_PIN_GROUP("i2s2-data-rx1", x2000_i2s2_data_rx1, 2),
+ INGENIC_PIN_GROUP("i2s2-data-rx2", x2000_i2s2_data_rx2, 2),
+ INGENIC_PIN_GROUP("i2s2-data-rx3", x2000_i2s2_data_rx3, 2),
+ INGENIC_PIN_GROUP("i2s2-clk-rx", x2000_i2s2_clk_rx, 2),
+ INGENIC_PIN_GROUP("i2s2-sysclk-rx", x2000_i2s2_sysclk_rx, 2),
+ INGENIC_PIN_GROUP("i2s3-data-tx0", x2000_i2s3_data_tx0, 2),
+ INGENIC_PIN_GROUP("i2s3-data-tx1", x2000_i2s3_data_tx1, 2),
+ INGENIC_PIN_GROUP("i2s3-data-tx2", x2000_i2s3_data_tx2, 2),
+ INGENIC_PIN_GROUP("i2s3-data-tx3", x2000_i2s3_data_tx3, 2),
+ INGENIC_PIN_GROUP("i2s3-clk-tx", x2000_i2s3_clk_tx, 2),
+ INGENIC_PIN_GROUP("i2s3-sysclk-tx", x2000_i2s3_sysclk_tx, 2),
+ INGENIC_PIN_GROUP("dmic-if0", x2000_dmic_if0, 0),
+ INGENIC_PIN_GROUP("dmic-if1", x2000_dmic_if1, 0),
+ INGENIC_PIN_GROUP("dmic-if2", x2000_dmic_if2, 0),
+ INGENIC_PIN_GROUP("dmic-if3", x2000_dmic_if3, 0),
+ INGENIC_PIN_GROUP_FUNCS("cim-data-8bit", x2000_cim_8bit,
+ x2000_cim_8bit_funcs),
+ INGENIC_PIN_GROUP("cim-data-12bit", x2000_cim_12bit, 0),
+ INGENIC_PIN_GROUP("lcd-tft-8bit", x2000_lcd_tft_8bit, 1),
+ INGENIC_PIN_GROUP("lcd-tft-16bit", x2000_lcd_tft_16bit, 1),
+ INGENIC_PIN_GROUP("lcd-tft-18bit", x2000_lcd_tft_18bit, 1),
+ INGENIC_PIN_GROUP("lcd-tft-24bit", x2000_lcd_tft_24bit, 1),
+ INGENIC_PIN_GROUP("lcd-slcd-8bit", x2000_lcd_slcd_8bit, 2),
+ INGENIC_PIN_GROUP("lcd-slcd-16bit", x2000_lcd_tft_16bit, 2),
+ INGENIC_PIN_GROUP("pwm0-c", x2000_pwm_pwm0_c, 0),
+ INGENIC_PIN_GROUP("pwm0-d", x2000_pwm_pwm0_d, 2),
+ INGENIC_PIN_GROUP("pwm1-c", x2000_pwm_pwm1_c, 0),
+ INGENIC_PIN_GROUP("pwm1-d", x2000_pwm_pwm1_d, 2),
+ INGENIC_PIN_GROUP("pwm2-c", x2000_pwm_pwm2_c, 0),
+ INGENIC_PIN_GROUP("pwm2-e", x2000_pwm_pwm2_e, 1),
+ INGENIC_PIN_GROUP("pwm3-c", x2000_pwm_pwm3_c, 0),
+ INGENIC_PIN_GROUP("pwm3-e", x2000_pwm_pwm3_e, 1),
+ INGENIC_PIN_GROUP("pwm4-c", x2000_pwm_pwm4_c, 0),
+ INGENIC_PIN_GROUP("pwm4-e", x2000_pwm_pwm4_e, 1),
+ INGENIC_PIN_GROUP("pwm5-c", x2000_pwm_pwm5_c, 0),
+ INGENIC_PIN_GROUP("pwm5-e", x2000_pwm_pwm5_e, 1),
+ INGENIC_PIN_GROUP("pwm6-c", x2000_pwm_pwm6_c, 0),
+ INGENIC_PIN_GROUP("pwm6-e", x2000_pwm_pwm6_e, 1),
+ INGENIC_PIN_GROUP("pwm7-c", x2000_pwm_pwm7_c, 0),
+ INGENIC_PIN_GROUP("pwm7-e", x2000_pwm_pwm7_e, 1),
+ INGENIC_PIN_GROUP("pwm8", x2000_pwm_pwm8, 0),
+ INGENIC_PIN_GROUP("pwm9", x2000_pwm_pwm9, 0),
+ INGENIC_PIN_GROUP("pwm10", x2000_pwm_pwm10, 0),
+ INGENIC_PIN_GROUP("pwm11", x2000_pwm_pwm11, 0),
+ INGENIC_PIN_GROUP("pwm12", x2000_pwm_pwm12, 0),
+ INGENIC_PIN_GROUP("pwm13", x2000_pwm_pwm13, 0),
+ INGENIC_PIN_GROUP("pwm14", x2000_pwm_pwm14, 0),
+ INGENIC_PIN_GROUP("pwm15", x2000_pwm_pwm15, 0),
+ INGENIC_PIN_GROUP("mac", x2100_mac, 1),
+};
+
+static const char *x2100_mac_groups[] = { "mac", };
+
+static const struct function_desc x2100_functions[] = {
+ { "uart0", x2000_uart0_groups, ARRAY_SIZE(x2000_uart0_groups), },
+ { "uart1", x2000_uart1_groups, ARRAY_SIZE(x2000_uart1_groups), },
+ { "uart2", x2000_uart2_groups, ARRAY_SIZE(x2000_uart2_groups), },
+ { "uart3", x2000_uart3_groups, ARRAY_SIZE(x2000_uart3_groups), },
+ { "uart4", x2000_uart4_groups, ARRAY_SIZE(x2000_uart4_groups), },
+ { "uart5", x2000_uart5_groups, ARRAY_SIZE(x2000_uart5_groups), },
+ { "uart6", x2000_uart6_groups, ARRAY_SIZE(x2000_uart6_groups), },
+ { "uart7", x2000_uart7_groups, ARRAY_SIZE(x2000_uart7_groups), },
+ { "uart8", x2000_uart8_groups, ARRAY_SIZE(x2000_uart8_groups), },
+ { "uart9", x2000_uart9_groups, ARRAY_SIZE(x2000_uart9_groups), },
+ { "sfc", x2000_sfc_groups, ARRAY_SIZE(x2000_sfc_groups), },
+ { "ssi0", x2000_ssi0_groups, ARRAY_SIZE(x2000_ssi0_groups), },
+ { "ssi1", x2000_ssi1_groups, ARRAY_SIZE(x2000_ssi1_groups), },
+ { "mmc0", x2000_mmc0_groups, ARRAY_SIZE(x2000_mmc0_groups), },
+ { "mmc1", x2000_mmc1_groups, ARRAY_SIZE(x2000_mmc1_groups), },
+ { "mmc2", x2000_mmc2_groups, ARRAY_SIZE(x2000_mmc2_groups), },
+ { "emc", x2000_emc_groups, ARRAY_SIZE(x2000_emc_groups), },
+ { "emc-cs1", x2000_cs1_groups, ARRAY_SIZE(x2000_cs1_groups), },
+ { "emc-cs2", x2000_cs2_groups, ARRAY_SIZE(x2000_cs2_groups), },
+ { "i2c0", x2000_i2c0_groups, ARRAY_SIZE(x2000_i2c0_groups), },
+ { "i2c1", x2000_i2c1_groups, ARRAY_SIZE(x2000_i2c1_groups), },
+ { "i2c2", x2000_i2c2_groups, ARRAY_SIZE(x2000_i2c2_groups), },
+ { "i2c3", x2000_i2c3_groups, ARRAY_SIZE(x2000_i2c3_groups), },
+ { "i2c4", x2000_i2c4_groups, ARRAY_SIZE(x2000_i2c4_groups), },
+ { "i2c5", x2000_i2c5_groups, ARRAY_SIZE(x2000_i2c5_groups), },
+ { "i2s1", x2000_i2s1_groups, ARRAY_SIZE(x2000_i2s1_groups), },
+ { "i2s2", x2000_i2s2_groups, ARRAY_SIZE(x2000_i2s2_groups), },
+ { "i2s3", x2000_i2s3_groups, ARRAY_SIZE(x2000_i2s3_groups), },
+ { "dmic", x2000_dmic_groups, ARRAY_SIZE(x2000_dmic_groups), },
+ { "cim", x2000_cim_groups, ARRAY_SIZE(x2000_cim_groups), },
+ { "lcd", x2000_lcd_groups, ARRAY_SIZE(x2000_lcd_groups), },
+ { "pwm0", x2000_pwm0_groups, ARRAY_SIZE(x2000_pwm0_groups), },
+ { "pwm1", x2000_pwm1_groups, ARRAY_SIZE(x2000_pwm1_groups), },
+ { "pwm2", x2000_pwm2_groups, ARRAY_SIZE(x2000_pwm2_groups), },
+ { "pwm3", x2000_pwm3_groups, ARRAY_SIZE(x2000_pwm3_groups), },
+ { "pwm4", x2000_pwm4_groups, ARRAY_SIZE(x2000_pwm4_groups), },
+ { "pwm5", x2000_pwm5_groups, ARRAY_SIZE(x2000_pwm5_groups), },
+ { "pwm6", x2000_pwm6_groups, ARRAY_SIZE(x2000_pwm6_groups), },
+ { "pwm7", x2000_pwm7_groups, ARRAY_SIZE(x2000_pwm7_groups), },
+ { "pwm8", x2000_pwm8_groups, ARRAY_SIZE(x2000_pwm8_groups), },
+ { "pwm9", x2000_pwm9_groups, ARRAY_SIZE(x2000_pwm9_groups), },
+ { "pwm10", x2000_pwm10_groups, ARRAY_SIZE(x2000_pwm10_groups), },
+ { "pwm11", x2000_pwm11_groups, ARRAY_SIZE(x2000_pwm11_groups), },
+ { "pwm12", x2000_pwm12_groups, ARRAY_SIZE(x2000_pwm12_groups), },
+ { "pwm13", x2000_pwm13_groups, ARRAY_SIZE(x2000_pwm13_groups), },
+ { "pwm14", x2000_pwm14_groups, ARRAY_SIZE(x2000_pwm14_groups), },
+ { "pwm15", x2000_pwm15_groups, ARRAY_SIZE(x2000_pwm15_groups), },
+ { "mac", x2100_mac_groups, ARRAY_SIZE(x2100_mac_groups), },
+};
+
+static const struct ingenic_chip_info x2100_chip_info = {
+ .num_chips = 5,
+ .reg_offset = 0x100,
+ .version = ID_X2100,
+ .groups = x2100_groups,
+ .num_groups = ARRAY_SIZE(x2100_groups),
+ .functions = x2100_functions,
+ .num_functions = ARRAY_SIZE(x2100_functions),
+ .pull_ups = x2100_pull_ups,
+ .pull_downs = x2100_pull_downs,
+};
+
static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
{
unsigned int val;
@@ -3441,17 +3830,17 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
{
if (jzpc->info->version >= ID_X2000) {
switch (bias) {
- case PIN_CONFIG_BIAS_PULL_UP:
+ case GPIO_PULL_UP:
ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, false);
ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, true);
break;
- case PIN_CONFIG_BIAS_PULL_DOWN:
+ case GPIO_PULL_DOWN:
ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, false);
ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, true);
break;
- case PIN_CONFIG_BIAS_DISABLE:
+ case GPIO_PULL_DIS:
default:
ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, false);
ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, false);
@@ -3654,19 +4043,20 @@ static const struct regmap_config ingenic_pinctrl_regmap_config = {
.reg_stride = 4,
};
-static const struct of_device_id ingenic_gpio_of_match[] __initconst = {
- { .compatible = "ingenic,jz4730-gpio", },
- { .compatible = "ingenic,jz4740-gpio", },
- { .compatible = "ingenic,jz4725b-gpio", },
- { .compatible = "ingenic,jz4750-gpio", },
- { .compatible = "ingenic,jz4755-gpio", },
- { .compatible = "ingenic,jz4760-gpio", },
- { .compatible = "ingenic,jz4770-gpio", },
- { .compatible = "ingenic,jz4775-gpio", },
- { .compatible = "ingenic,jz4780-gpio", },
- { .compatible = "ingenic,x1000-gpio", },
- { .compatible = "ingenic,x1830-gpio", },
- { .compatible = "ingenic,x2000-gpio", },
+static const struct of_device_id ingenic_gpio_of_matches[] __initconst = {
+ { .compatible = "ingenic,jz4730-gpio" },
+ { .compatible = "ingenic,jz4740-gpio" },
+ { .compatible = "ingenic,jz4725b-gpio" },
+ { .compatible = "ingenic,jz4750-gpio" },
+ { .compatible = "ingenic,jz4755-gpio" },
+ { .compatible = "ingenic,jz4760-gpio" },
+ { .compatible = "ingenic,jz4770-gpio" },
+ { .compatible = "ingenic,jz4775-gpio" },
+ { .compatible = "ingenic,jz4780-gpio" },
+ { .compatible = "ingenic,x1000-gpio" },
+ { .compatible = "ingenic,x1830-gpio" },
+ { .compatible = "ingenic,x2000-gpio" },
+ { .compatible = "ingenic,x2100-gpio" },
{},
};
@@ -3759,6 +4149,7 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
void __iomem *base;
const struct ingenic_chip_info *chip_info;
struct device_node *node;
+ struct regmap_config regmap_config;
unsigned int i;
int err;
@@ -3776,8 +4167,10 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- jzpc->map = devm_regmap_init_mmio(dev, base,
- &ingenic_pinctrl_regmap_config);
+ regmap_config = ingenic_pinctrl_regmap_config;
+ regmap_config.max_register = chip_info->num_chips * chip_info->reg_offset;
+
+ jzpc->map = devm_regmap_init_mmio(dev, base, &regmap_config);
if (IS_ERR(jzpc->map)) {
dev_err(dev, "Failed to create regmap\n");
return PTR_ERR(jzpc->map);
@@ -3843,7 +4236,7 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
dev_set_drvdata(dev, jzpc->map);
for_each_child_of_node(dev->of_node, node) {
- if (of_match_node(ingenic_gpio_of_match, node)) {
+ if (of_match_node(ingenic_gpio_of_matches, node)) {
err = ingenic_gpio_probe(jzpc, node);
if (err) {
of_node_put(node);
@@ -3857,7 +4250,7 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
#define IF_ENABLED(cfg, ptr) PTR_IF(IS_ENABLED(cfg), (ptr))
-static const struct of_device_id ingenic_pinctrl_of_match[] = {
+static const struct of_device_id ingenic_pinctrl_of_matches[] = {
{
.compatible = "ingenic,jz4730-pinctrl",
.data = IF_ENABLED(CONFIG_MACH_JZ4730, &jz4730_chip_info)
@@ -3922,13 +4315,17 @@ static const struct of_device_id ingenic_pinctrl_of_match[] = {
.compatible = "ingenic,x2000e-pinctrl",
.data = IF_ENABLED(CONFIG_MACH_X2000, &x2000_chip_info)
},
+ {
+ .compatible = "ingenic,x2100-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_X2100, &x2100_chip_info)
+ },
{ /* sentinel */ },
};
static struct platform_driver ingenic_pinctrl_driver = {
.driver = {
.name = "pinctrl-ingenic",
- .of_match_table = ingenic_pinctrl_of_match,
+ .of_match_table = ingenic_pinctrl_of_matches,
},
};
diff --git a/drivers/pinctrl/pinctrl-keembay.c b/drivers/pinctrl/pinctrl-keembay.c
new file mode 100644
index 000000000000..2bce563d5b8b
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-keembay.c
@@ -0,0 +1,1731 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2020 Intel Corporation */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <linux/platform_device.h>
+
+#include "core.h"
+#include "pinmux.h"
+
+/* GPIO data registers' offsets */
+#define KEEMBAY_GPIO_DATA_OUT 0x000
+#define KEEMBAY_GPIO_DATA_IN 0x020
+#define KEEMBAY_GPIO_DATA_IN_RAW 0x040
+#define KEEMBAY_GPIO_DATA_HIGH 0x060
+#define KEEMBAY_GPIO_DATA_LOW 0x080
+
+/* GPIO Interrupt and mode registers' offsets */
+#define KEEMBAY_GPIO_INT_CFG 0x000
+#define KEEMBAY_GPIO_MODE 0x070
+
+/* GPIO mode register bit fields */
+#define KEEMBAY_GPIO_MODE_PULLUP_MASK GENMASK(13, 12)
+#define KEEMBAY_GPIO_MODE_DRIVE_MASK GENMASK(8, 7)
+#define KEEMBAY_GPIO_MODE_INV_MASK GENMASK(5, 4)
+#define KEEMBAY_GPIO_MODE_SELECT_MASK GENMASK(2, 0)
+#define KEEMBAY_GPIO_MODE_DIR_OVR BIT(15)
+#define KEEMBAY_GPIO_MODE_REN BIT(11)
+#define KEEMBAY_GPIO_MODE_SCHMITT_EN BIT(10)
+#define KEEMBAY_GPIO_MODE_SLEW_RATE BIT(9)
+#define KEEMBAY_GPIO_IRQ_ENABLE BIT(7)
+#define KEEMBAY_GPIO_MODE_DIR BIT(3)
+#define KEEMBAY_GPIO_MODE_DEFAULT 0x7
+#define KEEMBAY_GPIO_MODE_INV_VAL 0x3
+
+#define KEEMBAY_GPIO_DISABLE 0
+#define KEEMBAY_GPIO_PULL_UP 1
+#define KEEMBAY_GPIO_PULL_DOWN 2
+#define KEEMBAY_GPIO_BUS_HOLD 3
+#define KEEMBAY_GPIO_NUM_IRQ 8
+#define KEEMBAY_GPIO_MAX_PER_IRQ 4
+#define KEEMBAY_GPIO_MAX_PER_REG 32
+#define KEEMBAY_GPIO_MIN_STRENGTH 2
+#define KEEMBAY_GPIO_MAX_STRENGTH 12
+#define KEEMBAY_GPIO_SENSE_LOW (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)
+
+/* GPIO reg address calculation */
+#define KEEMBAY_GPIO_REG_OFFSET(pin) ((pin) * 4)
+
+/**
+ * struct keembay_mux_desc - Mux properties of each GPIO pin
+ * @mode: Pin mode when operating in this function
+ * @name: Pin function name
+ */
+struct keembay_mux_desc {
+ u8 mode;
+ const char *name;
+};
+
+#define KEEMBAY_PIN_DESC(pin_number, pin_name, ...) { \
+ .number = pin_number, \
+ .name = pin_name, \
+ .drv_data = &(struct keembay_mux_desc[]) { \
+ __VA_ARGS__, { } }, \
+} \
+
+#define KEEMBAY_MUX(pin_mode, pin_function) { \
+ .mode = pin_mode, \
+ .name = pin_function, \
+} \
+
+/**
+ * struct keembay_gpio_irq - Config of each GPIO Interrupt sources
+ * @source: Interrupt source number (0 - 7)
+ * @line: Actual Interrupt line number
+ * @pins: Array of GPIO pins using this Interrupt line
+ * @trigger: Interrupt trigger type for this line
+ * @num_share: Number of pins currently using this Interrupt line
+ */
+struct keembay_gpio_irq {
+ unsigned int source;
+ unsigned int line;
+ unsigned int pins[KEEMBAY_GPIO_MAX_PER_IRQ];
+ unsigned int trigger;
+ unsigned int num_share;
+};
+
+/**
+ * struct keembay_pinctrl - Intel Keembay pinctrl structure
+ * @pctrl: Pointer to the pin controller device
+ * @base0: First register base address
+ * @base1: Second register base address
+ * @dev: Pointer to the device structure
+ * @chip: GPIO chip used by this pin controller
+ * @soc: Pin control configuration data based on SoC
+ * @lock: Spinlock to protect various gpio config register access
+ * @ngroups: Number of pin groups available
+ * @nfuncs: Number of pin functions available
+ * @npins: Number of GPIO pins available
+ * @irq: Store Interrupt source
+ * @max_gpios_level_type: Store max level trigger type
+ * @max_gpios_edge_type: Store max edge trigger type
+ */
+struct keembay_pinctrl {
+ struct pinctrl_dev *pctrl;
+ void __iomem *base0;
+ void __iomem *base1;
+ struct device *dev;
+ struct gpio_chip chip;
+ const struct keembay_pin_soc *soc;
+ raw_spinlock_t lock;
+ unsigned int ngroups;
+ unsigned int nfuncs;
+ unsigned int npins;
+ struct keembay_gpio_irq irq[KEEMBAY_GPIO_NUM_IRQ];
+ int max_gpios_level_type;
+ int max_gpios_edge_type;
+};
+
+/**
+ * struct keembay_pin_soc - Pin control config data based on SoC
+ * @pins: Pin description structure
+ */
+struct keembay_pin_soc {
+ const struct pinctrl_pin_desc *pins;
+};
+
+static const struct pinctrl_pin_desc keembay_pins[] = {
+ KEEMBAY_PIN_DESC(0, "GPIO0",
+ KEEMBAY_MUX(0x0, "I2S0_M0"),
+ KEEMBAY_MUX(0x1, "SD0_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS0_M2"),
+ KEEMBAY_MUX(0x3, "I2C0_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(1, "GPIO1",
+ KEEMBAY_MUX(0x0, "I2S0_M0"),
+ KEEMBAY_MUX(0x1, "SD0_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS0_M2"),
+ KEEMBAY_MUX(0x3, "I2C0_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(2, "GPIO2",
+ KEEMBAY_MUX(0x0, "I2S0_M0"),
+ KEEMBAY_MUX(0x1, "I2S0_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS0_M2"),
+ KEEMBAY_MUX(0x3, "I2C1_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(3, "GPIO3",
+ KEEMBAY_MUX(0x0, "I2S0_M0"),
+ KEEMBAY_MUX(0x1, "I2S0_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS0_M2"),
+ KEEMBAY_MUX(0x3, "I2C1_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(4, "GPIO4",
+ KEEMBAY_MUX(0x0, "I2S0_M0"),
+ KEEMBAY_MUX(0x1, "I2S0_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS0_M2"),
+ KEEMBAY_MUX(0x3, "I2C2_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(5, "GPIO5",
+ KEEMBAY_MUX(0x0, "I2S0_M0"),
+ KEEMBAY_MUX(0x1, "I2S0_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS0_M2"),
+ KEEMBAY_MUX(0x3, "I2C2_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(6, "GPIO6",
+ KEEMBAY_MUX(0x0, "I2S1_M0"),
+ KEEMBAY_MUX(0x1, "SD0_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS0_M2"),
+ KEEMBAY_MUX(0x3, "I2C3_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(7, "GPIO7",
+ KEEMBAY_MUX(0x0, "I2S1_M0"),
+ KEEMBAY_MUX(0x1, "SD0_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS0_M2"),
+ KEEMBAY_MUX(0x3, "I2C3_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(8, "GPIO8",
+ KEEMBAY_MUX(0x0, "I2S1_M0"),
+ KEEMBAY_MUX(0x1, "I2S1_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS0_M2"),
+ KEEMBAY_MUX(0x3, "UART0_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(9, "GPIO9",
+ KEEMBAY_MUX(0x0, "I2S1_M0"),
+ KEEMBAY_MUX(0x1, "I2S1_M1"),
+ KEEMBAY_MUX(0x2, "PWM_M2"),
+ KEEMBAY_MUX(0x3, "UART0_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(10, "GPIO10",
+ KEEMBAY_MUX(0x0, "I2S2_M0"),
+ KEEMBAY_MUX(0x1, "SD0_M1"),
+ KEEMBAY_MUX(0x2, "PWM_M2"),
+ KEEMBAY_MUX(0x3, "UART0_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(11, "GPIO11",
+ KEEMBAY_MUX(0x0, "I2S2_M0"),
+ KEEMBAY_MUX(0x1, "SD0_M1"),
+ KEEMBAY_MUX(0x2, "PWM_M2"),
+ KEEMBAY_MUX(0x3, "UART0_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(12, "GPIO12",
+ KEEMBAY_MUX(0x0, "I2S2_M0"),
+ KEEMBAY_MUX(0x1, "I2S2_M1"),
+ KEEMBAY_MUX(0x2, "PWM_M2"),
+ KEEMBAY_MUX(0x3, "SPI0_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(13, "GPIO13",
+ KEEMBAY_MUX(0x0, "I2S2_M0"),
+ KEEMBAY_MUX(0x1, "I2S2_M1"),
+ KEEMBAY_MUX(0x2, "PWM_M2"),
+ KEEMBAY_MUX(0x3, "SPI0_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(14, "GPIO14",
+ KEEMBAY_MUX(0x0, "UART0_M0"),
+ KEEMBAY_MUX(0x1, "I2S3_M1"),
+ KEEMBAY_MUX(0x2, "PWM_M2"),
+ KEEMBAY_MUX(0x3, "SD1_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "ETH_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(15, "GPIO15",
+ KEEMBAY_MUX(0x0, "UART0_M0"),
+ KEEMBAY_MUX(0x1, "I2S3_M1"),
+ KEEMBAY_MUX(0x2, "UART0_M2"),
+ KEEMBAY_MUX(0x3, "SD1_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "SPI1_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(16, "GPIO16",
+ KEEMBAY_MUX(0x0, "UART0_M0"),
+ KEEMBAY_MUX(0x1, "I2S3_M1"),
+ KEEMBAY_MUX(0x2, "UART0_M2"),
+ KEEMBAY_MUX(0x3, "SD1_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "SPI1_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(17, "GPIO17",
+ KEEMBAY_MUX(0x0, "UART0_M0"),
+ KEEMBAY_MUX(0x1, "I2S3_M1"),
+ KEEMBAY_MUX(0x2, "I2S3_M2"),
+ KEEMBAY_MUX(0x3, "SD1_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "SPI1_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(18, "GPIO18",
+ KEEMBAY_MUX(0x0, "UART1_M0"),
+ KEEMBAY_MUX(0x1, "SPI0_M1"),
+ KEEMBAY_MUX(0x2, "I2S3_M2"),
+ KEEMBAY_MUX(0x3, "SD1_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "SPI1_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(19, "GPIO19",
+ KEEMBAY_MUX(0x0, "UART1_M0"),
+ KEEMBAY_MUX(0x1, "LCD_M1"),
+ KEEMBAY_MUX(0x2, "DEBUG_M2"),
+ KEEMBAY_MUX(0x3, "SD1_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "SPI1_M5"),
+ KEEMBAY_MUX(0x6, "LCD_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(20, "GPIO20",
+ KEEMBAY_MUX(0x0, "UART1_M0"),
+ KEEMBAY_MUX(0x1, "LCD_M1"),
+ KEEMBAY_MUX(0x2, "DEBUG_M2"),
+ KEEMBAY_MUX(0x3, "CPR_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "SPI1_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS0_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(21, "GPIO21",
+ KEEMBAY_MUX(0x0, "UART1_M0"),
+ KEEMBAY_MUX(0x1, "LCD_M1"),
+ KEEMBAY_MUX(0x2, "DEBUG_M2"),
+ KEEMBAY_MUX(0x3, "CPR_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "I3C0_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS0_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(22, "GPIO22",
+ KEEMBAY_MUX(0x0, "I2C0_M0"),
+ KEEMBAY_MUX(0x1, "UART2_M1"),
+ KEEMBAY_MUX(0x2, "DEBUG_M2"),
+ KEEMBAY_MUX(0x3, "CPR_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "I3C0_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS0_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(23, "GPIO23",
+ KEEMBAY_MUX(0x0, "I2C0_M0"),
+ KEEMBAY_MUX(0x1, "UART2_M1"),
+ KEEMBAY_MUX(0x2, "DEBUG_M2"),
+ KEEMBAY_MUX(0x3, "CPR_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "I3C1_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS0_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(24, "GPIO24",
+ KEEMBAY_MUX(0x0, "I2C1_M0"),
+ KEEMBAY_MUX(0x1, "UART2_M1"),
+ KEEMBAY_MUX(0x2, "DEBUG_M2"),
+ KEEMBAY_MUX(0x3, "CPR_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "I3C1_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS0_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(25, "GPIO25",
+ KEEMBAY_MUX(0x0, "I2C1_M0"),
+ KEEMBAY_MUX(0x1, "UART2_M1"),
+ KEEMBAY_MUX(0x2, "SPI0_M2"),
+ KEEMBAY_MUX(0x3, "CPR_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "I3C2_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS0_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(26, "GPIO26",
+ KEEMBAY_MUX(0x0, "SPI0_M0"),
+ KEEMBAY_MUX(0x1, "I2C2_M1"),
+ KEEMBAY_MUX(0x2, "UART0_M2"),
+ KEEMBAY_MUX(0x3, "DSU_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "I3C2_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS0_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(27, "GPIO27",
+ KEEMBAY_MUX(0x0, "SPI0_M0"),
+ KEEMBAY_MUX(0x1, "I2C2_M1"),
+ KEEMBAY_MUX(0x2, "UART0_M2"),
+ KEEMBAY_MUX(0x3, "DSU_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "I3C0_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS0_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(28, "GPIO28",
+ KEEMBAY_MUX(0x0, "SPI0_M0"),
+ KEEMBAY_MUX(0x1, "I2C3_M1"),
+ KEEMBAY_MUX(0x2, "UART0_M2"),
+ KEEMBAY_MUX(0x3, "PWM_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "I3C1_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS0_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(29, "GPIO29",
+ KEEMBAY_MUX(0x0, "SPI0_M0"),
+ KEEMBAY_MUX(0x1, "I2C3_M1"),
+ KEEMBAY_MUX(0x2, "UART0_M2"),
+ KEEMBAY_MUX(0x3, "PWM_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "I3C2_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS1_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(30, "GPIO30",
+ KEEMBAY_MUX(0x0, "SPI0_M0"),
+ KEEMBAY_MUX(0x1, "I2S0_M1"),
+ KEEMBAY_MUX(0x2, "I2C4_M2"),
+ KEEMBAY_MUX(0x3, "PWM_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS1_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(31, "GPIO31",
+ KEEMBAY_MUX(0x0, "SPI0_M0"),
+ KEEMBAY_MUX(0x1, "I2S0_M1"),
+ KEEMBAY_MUX(0x2, "I2C4_M2"),
+ KEEMBAY_MUX(0x3, "PWM_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "UART1_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS1_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(32, "GPIO32",
+ KEEMBAY_MUX(0x0, "SD0_M0"),
+ KEEMBAY_MUX(0x1, "SPI0_M1"),
+ KEEMBAY_MUX(0x2, "UART1_M2"),
+ KEEMBAY_MUX(0x3, "PWM_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "PCIE_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS1_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(33, "GPIO33",
+ KEEMBAY_MUX(0x0, "SD0_M0"),
+ KEEMBAY_MUX(0x1, "SPI0_M1"),
+ KEEMBAY_MUX(0x2, "UART1_M2"),
+ KEEMBAY_MUX(0x3, "PWM_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "PCIE_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS1_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(34, "GPIO34",
+ KEEMBAY_MUX(0x0, "SD0_M0"),
+ KEEMBAY_MUX(0x1, "SPI0_M1"),
+ KEEMBAY_MUX(0x2, "I2C0_M2"),
+ KEEMBAY_MUX(0x3, "UART1_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "I2S0_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS1_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(35, "GPIO35",
+ KEEMBAY_MUX(0x0, "SD0_M0"),
+ KEEMBAY_MUX(0x1, "PCIE_M1"),
+ KEEMBAY_MUX(0x2, "I2C0_M2"),
+ KEEMBAY_MUX(0x3, "UART1_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "I2S0_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS1_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(36, "GPIO36",
+ KEEMBAY_MUX(0x0, "SD0_M0"),
+ KEEMBAY_MUX(0x1, "SPI3_M1"),
+ KEEMBAY_MUX(0x2, "I2C1_M2"),
+ KEEMBAY_MUX(0x3, "DEBUG_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "I2S0_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS1_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(37, "GPIO37",
+ KEEMBAY_MUX(0x0, "SD0_M0"),
+ KEEMBAY_MUX(0x1, "SPI3_M1"),
+ KEEMBAY_MUX(0x2, "I2C1_M2"),
+ KEEMBAY_MUX(0x3, "DEBUG_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "I2S0_M5"),
+ KEEMBAY_MUX(0x6, "SLVDS1_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(38, "GPIO38",
+ KEEMBAY_MUX(0x0, "I3C1_M0"),
+ KEEMBAY_MUX(0x1, "SPI3_M1"),
+ KEEMBAY_MUX(0x2, "UART3_M2"),
+ KEEMBAY_MUX(0x3, "DEBUG_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "I2C2_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(39, "GPIO39",
+ KEEMBAY_MUX(0x0, "I3C1_M0"),
+ KEEMBAY_MUX(0x1, "SPI3_M1"),
+ KEEMBAY_MUX(0x2, "UART3_M2"),
+ KEEMBAY_MUX(0x3, "DEBUG_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "I2C2_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(40, "GPIO40",
+ KEEMBAY_MUX(0x0, "I2S2_M0"),
+ KEEMBAY_MUX(0x1, "SPI3_M1"),
+ KEEMBAY_MUX(0x2, "UART3_M2"),
+ KEEMBAY_MUX(0x3, "DEBUG_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "I2C3_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(41, "GPIO41",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SPI3_M1"),
+ KEEMBAY_MUX(0x2, "SPI3_M2"),
+ KEEMBAY_MUX(0x3, "DEBUG_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "I2C3_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(42, "GPIO42",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SD1_M1"),
+ KEEMBAY_MUX(0x2, "SPI3_M2"),
+ KEEMBAY_MUX(0x3, "CPR_M3"),
+ KEEMBAY_MUX(0x4, "CAM_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "I2C4_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(43, "GPIO43",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SD1_M1"),
+ KEEMBAY_MUX(0x2, "SPI3_M2"),
+ KEEMBAY_MUX(0x3, "CPR_M3"),
+ KEEMBAY_MUX(0x4, "I2S0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "I2C4_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(44, "GPIO44",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SD1_M1"),
+ KEEMBAY_MUX(0x2, "SPI0_M2"),
+ KEEMBAY_MUX(0x3, "CPR_M3"),
+ KEEMBAY_MUX(0x4, "I2S0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(45, "GPIO45",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SD1_M1"),
+ KEEMBAY_MUX(0x2, "SPI0_M2"),
+ KEEMBAY_MUX(0x3, "CPR_M3"),
+ KEEMBAY_MUX(0x4, "I2S0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(46, "GPIO46",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SD1_M1"),
+ KEEMBAY_MUX(0x2, "SPI0_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "I2S0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(47, "GPIO47",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SD1_M1"),
+ KEEMBAY_MUX(0x2, "SPI0_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "I2S0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(48, "GPIO48",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SPI2_M1"),
+ KEEMBAY_MUX(0x2, "UART2_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "I2S0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(49, "GPIO49",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SPI2_M1"),
+ KEEMBAY_MUX(0x2, "UART2_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "I2S1_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(50, "GPIO50",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SPI2_M1"),
+ KEEMBAY_MUX(0x2, "UART2_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "I2S1_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(51, "GPIO51",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SPI2_M1"),
+ KEEMBAY_MUX(0x2, "UART2_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "I2S1_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(52, "GPIO52",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SPI2_M1"),
+ KEEMBAY_MUX(0x2, "SD0_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "I2S1_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(53, "GPIO53",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SPI2_M1"),
+ KEEMBAY_MUX(0x2, "SD0_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "I2S2_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(54, "GPIO54",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SPI2_M1"),
+ KEEMBAY_MUX(0x2, "SD0_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "I2S2_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(55, "GPIO55",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SPI2_M1"),
+ KEEMBAY_MUX(0x2, "SD1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "I2S2_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(56, "GPIO56",
+ KEEMBAY_MUX(0x0, "ETH_M0"),
+ KEEMBAY_MUX(0x1, "SPI2_M1"),
+ KEEMBAY_MUX(0x2, "SD1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "I2S2_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(57, "GPIO57",
+ KEEMBAY_MUX(0x0, "SPI1_M0"),
+ KEEMBAY_MUX(0x1, "I2S1_M1"),
+ KEEMBAY_MUX(0x2, "SD1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "UART0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(58, "GPIO58",
+ KEEMBAY_MUX(0x0, "SPI1_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "SD0_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "UART0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(59, "GPIO59",
+ KEEMBAY_MUX(0x0, "SPI1_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "SD0_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "UART0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(60, "GPIO60",
+ KEEMBAY_MUX(0x0, "SPI1_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "I3C1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "UART0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(61, "GPIO61",
+ KEEMBAY_MUX(0x0, "SPI1_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "SD0_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "UART1_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(62, "GPIO62",
+ KEEMBAY_MUX(0x0, "SPI1_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "SD1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "UART1_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(63, "GPIO63",
+ KEEMBAY_MUX(0x0, "I2S1_M0"),
+ KEEMBAY_MUX(0x1, "SPI1_M1"),
+ KEEMBAY_MUX(0x2, "SD1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "UART1_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(64, "GPIO64",
+ KEEMBAY_MUX(0x0, "I2S2_M0"),
+ KEEMBAY_MUX(0x1, "SPI1_M1"),
+ KEEMBAY_MUX(0x2, "ETH_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "UART1_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(65, "GPIO65",
+ KEEMBAY_MUX(0x0, "I3C0_M0"),
+ KEEMBAY_MUX(0x1, "SPI1_M1"),
+ KEEMBAY_MUX(0x2, "SD1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "SPI0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(66, "GPIO66",
+ KEEMBAY_MUX(0x0, "I3C0_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "I2C0_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "SPI0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "CAM_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(67, "GPIO67",
+ KEEMBAY_MUX(0x0, "I3C1_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "I2C0_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "SPI0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "I2S3_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(68, "GPIO68",
+ KEEMBAY_MUX(0x0, "I3C1_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "I2C1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "SPI0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "I2S3_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(69, "GPIO69",
+ KEEMBAY_MUX(0x0, "I3C2_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "I2C1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "SPI0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "I2S3_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(70, "GPIO70",
+ KEEMBAY_MUX(0x0, "I3C2_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "SPI0_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "SD0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "I2S3_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(71, "GPIO71",
+ KEEMBAY_MUX(0x0, "I3C0_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "SD0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "I2S3_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(72, "GPIO72",
+ KEEMBAY_MUX(0x0, "I3C1_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "SD0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "UART2_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(73, "GPIO73",
+ KEEMBAY_MUX(0x0, "I3C2_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "SD0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "UART2_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(74, "GPIO74",
+ KEEMBAY_MUX(0x0, "I3C0_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "SD0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "UART2_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(75, "GPIO75",
+ KEEMBAY_MUX(0x0, "I3C0_M0"),
+ KEEMBAY_MUX(0x1, "ETH_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "SD0_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "UART2_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(76, "GPIO76",
+ KEEMBAY_MUX(0x0, "I2C2_M0"),
+ KEEMBAY_MUX(0x1, "I3C0_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "ETH_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "UART3_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(77, "GPIO77",
+ KEEMBAY_MUX(0x0, "PCIE_M0"),
+ KEEMBAY_MUX(0x1, "I3C1_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "I3C2_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "UART3_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(78, "GPIO78",
+ KEEMBAY_MUX(0x0, "PCIE_M0"),
+ KEEMBAY_MUX(0x1, "I3C2_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "I3C2_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "UART3_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+ KEEMBAY_PIN_DESC(79, "GPIO79",
+ KEEMBAY_MUX(0x0, "PCIE_M0"),
+ KEEMBAY_MUX(0x1, "I2C2_M1"),
+ KEEMBAY_MUX(0x2, "SLVDS1_M2"),
+ KEEMBAY_MUX(0x3, "TPIU_M3"),
+ KEEMBAY_MUX(0x4, "I3C2_M4"),
+ KEEMBAY_MUX(0x5, "LCD_M5"),
+ KEEMBAY_MUX(0x6, "UART3_M6"),
+ KEEMBAY_MUX(0x7, "GPIO_M7")),
+};
+
+static inline u32 keembay_read_reg(void __iomem *base, unsigned int pin)
+{
+ return readl(base + KEEMBAY_GPIO_REG_OFFSET(pin));
+}
+
+static inline u32 keembay_read_gpio_reg(void __iomem *base, unsigned int pin)
+{
+ return keembay_read_reg(base, pin / KEEMBAY_GPIO_MAX_PER_REG);
+}
+
+static inline u32 keembay_read_pin(void __iomem *base, unsigned int pin)
+{
+ u32 val = keembay_read_gpio_reg(base, pin);
+
+ return !!(val & BIT(pin % KEEMBAY_GPIO_MAX_PER_REG));
+}
+
+static inline void keembay_write_reg(u32 val, void __iomem *base, unsigned int pin)
+{
+ writel(val, base + KEEMBAY_GPIO_REG_OFFSET(pin));
+}
+
+static inline void keembay_write_gpio_reg(u32 val, void __iomem *base, unsigned int pin)
+{
+ keembay_write_reg(val, base, pin / KEEMBAY_GPIO_MAX_PER_REG);
+}
+
+static void keembay_gpio_invert(struct keembay_pinctrl *kpc, unsigned int pin)
+{
+ unsigned int val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ /*
+ * This IP doesn't support the falling edge and low level interrupt
+ * trigger. Invert API is used to mimic the falling edge and low
+ * level support
+ */
+
+ val |= FIELD_PREP(KEEMBAY_GPIO_MODE_INV_MASK, KEEMBAY_GPIO_MODE_INV_VAL);
+ keembay_write_reg(val, kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+}
+
+static void keembay_gpio_restore_default(struct keembay_pinctrl *kpc, unsigned int pin)
+{
+ unsigned int val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ val &= FIELD_PREP(KEEMBAY_GPIO_MODE_INV_MASK, 0);
+ keembay_write_reg(val, kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+}
+
+static int keembay_request_gpio(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned int pin)
+{
+ struct keembay_pinctrl *kpc = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int val;
+
+ if (pin >= kpc->npins)
+ return -EINVAL;
+
+ val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+ val = FIELD_GET(KEEMBAY_GPIO_MODE_SELECT_MASK, val);
+
+ /* As per Pin Mux Map, Modes 0 to 6 are for peripherals */
+ if (val != KEEMBAY_GPIO_MODE_DEFAULT)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int keembay_set_mux(struct pinctrl_dev *pctldev, unsigned int fun_sel,
+ unsigned int grp_sel)
+{
+ struct keembay_pinctrl *kpc = pinctrl_dev_get_drvdata(pctldev);
+ struct function_desc *func;
+ struct group_desc *grp;
+ unsigned int val;
+ u8 pin_mode;
+ int pin;
+
+ grp = pinctrl_generic_get_group(pctldev, grp_sel);
+ if (!grp)
+ return -EINVAL;
+
+ func = pinmux_generic_get_function(pctldev, fun_sel);
+ if (!func)
+ return -EINVAL;
+
+ /* Change modes for pins in the selected group */
+ pin = *grp->pins;
+ pin_mode = *(u8 *)(func->data);
+
+ val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+ val = u32_replace_bits(val, pin_mode, KEEMBAY_GPIO_MODE_SELECT_MASK);
+ keembay_write_reg(val, kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ return 0;
+}
+
+static u32 keembay_pinconf_get_pull(struct keembay_pinctrl *kpc, unsigned int pin)
+{
+ unsigned int val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ return FIELD_GET(KEEMBAY_GPIO_MODE_PULLUP_MASK, val);
+}
+
+static int keembay_pinconf_set_pull(struct keembay_pinctrl *kpc, unsigned int pin,
+ unsigned int pull)
+{
+ unsigned int val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ val = u32_replace_bits(val, pull, KEEMBAY_GPIO_MODE_PULLUP_MASK);
+ keembay_write_reg(val, kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ return 0;
+}
+
+static int keembay_pinconf_get_drive(struct keembay_pinctrl *kpc, unsigned int pin)
+{
+ unsigned int val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ val = FIELD_GET(KEEMBAY_GPIO_MODE_DRIVE_MASK, val) * 4;
+ if (val)
+ return val;
+
+ return KEEMBAY_GPIO_MIN_STRENGTH;
+}
+
+static int keembay_pinconf_set_drive(struct keembay_pinctrl *kpc, unsigned int pin,
+ unsigned int drive)
+{
+ unsigned int val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+ unsigned int strength = clamp_val(drive, KEEMBAY_GPIO_MIN_STRENGTH,
+ KEEMBAY_GPIO_MAX_STRENGTH) / 4;
+
+ val = u32_replace_bits(val, strength, KEEMBAY_GPIO_MODE_DRIVE_MASK);
+ keembay_write_reg(val, kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ return 0;
+}
+
+static int keembay_pinconf_get_slew_rate(struct keembay_pinctrl *kpc, unsigned int pin)
+{
+ unsigned int val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ return !!(val & KEEMBAY_GPIO_MODE_SLEW_RATE);
+}
+
+static int keembay_pinconf_set_slew_rate(struct keembay_pinctrl *kpc, unsigned int pin,
+ unsigned int slew_rate)
+{
+ unsigned int val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ if (slew_rate)
+ val |= KEEMBAY_GPIO_MODE_SLEW_RATE;
+ else
+ val &= ~KEEMBAY_GPIO_MODE_SLEW_RATE;
+
+ keembay_write_reg(val, kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ return 0;
+}
+
+static int keembay_pinconf_get_schmitt(struct keembay_pinctrl *kpc, unsigned int pin)
+{
+ unsigned int val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ return !!(val & KEEMBAY_GPIO_MODE_SCHMITT_EN);
+}
+
+static int keembay_pinconf_set_schmitt(struct keembay_pinctrl *kpc, unsigned int pin,
+ unsigned int schmitt_en)
+{
+ unsigned int val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ if (schmitt_en)
+ val |= KEEMBAY_GPIO_MODE_SCHMITT_EN;
+ else
+ val &= ~KEEMBAY_GPIO_MODE_SCHMITT_EN;
+
+ keembay_write_reg(val, kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ return 0;
+}
+
+static int keembay_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *cfg)
+{
+ struct keembay_pinctrl *kpc = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int param = pinconf_to_config_param(*cfg);
+ unsigned int val;
+
+ if (pin >= kpc->npins)
+ return -EINVAL;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (keembay_pinconf_get_pull(kpc, pin) != KEEMBAY_GPIO_DISABLE)
+ return -EINVAL;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (keembay_pinconf_get_pull(kpc, pin) != KEEMBAY_GPIO_PULL_UP)
+ return -EINVAL;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (keembay_pinconf_get_pull(kpc, pin) != KEEMBAY_GPIO_PULL_DOWN)
+ return -EINVAL;
+ break;
+
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ if (keembay_pinconf_get_pull(kpc, pin) != KEEMBAY_GPIO_BUS_HOLD)
+ return -EINVAL;
+ break;
+
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ if (!keembay_pinconf_get_schmitt(kpc, pin))
+ return -EINVAL;
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ val = keembay_pinconf_get_slew_rate(kpc, pin);
+ *cfg = pinconf_to_config_packed(param, val);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ val = keembay_pinconf_get_drive(kpc, pin);
+ *cfg = pinconf_to_config_packed(param, val);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int keembay_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *cfg, unsigned int num_configs)
+{
+ struct keembay_pinctrl *kpc = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param;
+ unsigned int arg, i;
+ int ret = 0;
+
+ if (pin >= kpc->npins)
+ return -EINVAL;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(cfg[i]);
+ arg = pinconf_to_config_argument(cfg[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ ret = keembay_pinconf_set_pull(kpc, pin, KEEMBAY_GPIO_DISABLE);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = keembay_pinconf_set_pull(kpc, pin, KEEMBAY_GPIO_PULL_UP);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = keembay_pinconf_set_pull(kpc, pin, KEEMBAY_GPIO_PULL_DOWN);
+ break;
+
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ ret = keembay_pinconf_set_pull(kpc, pin, KEEMBAY_GPIO_BUS_HOLD);
+ break;
+
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ ret = keembay_pinconf_set_schmitt(kpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ ret = keembay_pinconf_set_slew_rate(kpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = keembay_pinconf_set_drive(kpc, pin, arg);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static const struct pinctrl_ops keembay_pctlops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static const struct pinmux_ops keembay_pmxops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .gpio_request_enable = keembay_request_gpio,
+ .set_mux = keembay_set_mux,
+};
+
+static const struct pinconf_ops keembay_confops = {
+ .is_generic = true,
+ .pin_config_get = keembay_pinconf_get,
+ .pin_config_set = keembay_pinconf_set,
+};
+
+static struct pinctrl_desc keembay_pinctrl_desc = {
+ .name = "keembay-pinmux",
+ .pctlops = &keembay_pctlops,
+ .pmxops = &keembay_pmxops,
+ .confops = &keembay_confops,
+ .owner = THIS_MODULE,
+};
+
+static int keembay_gpio_get(struct gpio_chip *gc, unsigned int pin)
+{
+ struct keembay_pinctrl *kpc = gpiochip_get_data(gc);
+ unsigned int val, offset;
+
+ val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+ offset = (val & KEEMBAY_GPIO_MODE_DIR) ? KEEMBAY_GPIO_DATA_IN : KEEMBAY_GPIO_DATA_OUT;
+
+ return keembay_read_pin(kpc->base0 + offset, pin);
+}
+
+static void keembay_gpio_set(struct gpio_chip *gc, unsigned int pin, int val)
+{
+ struct keembay_pinctrl *kpc = gpiochip_get_data(gc);
+ unsigned int reg_val;
+
+ reg_val = keembay_read_gpio_reg(kpc->base0 + KEEMBAY_GPIO_DATA_OUT, pin);
+ if (val)
+ keembay_write_gpio_reg(reg_val | BIT(pin % KEEMBAY_GPIO_MAX_PER_REG),
+ kpc->base0 + KEEMBAY_GPIO_DATA_HIGH, pin);
+ else
+ keembay_write_gpio_reg(~reg_val | BIT(pin % KEEMBAY_GPIO_MAX_PER_REG),
+ kpc->base0 + KEEMBAY_GPIO_DATA_LOW, pin);
+}
+
+static int keembay_gpio_get_direction(struct gpio_chip *gc, unsigned int pin)
+{
+ struct keembay_pinctrl *kpc = gpiochip_get_data(gc);
+ unsigned int val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ return !!(val & KEEMBAY_GPIO_MODE_DIR);
+}
+
+static int keembay_gpio_set_direction_in(struct gpio_chip *gc, unsigned int pin)
+{
+ struct keembay_pinctrl *kpc = gpiochip_get_data(gc);
+ unsigned int val;
+
+ val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+ val |= KEEMBAY_GPIO_MODE_DIR;
+ keembay_write_reg(val, kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+
+ return 0;
+}
+
+static int keembay_gpio_set_direction_out(struct gpio_chip *gc,
+ unsigned int pin, int value)
+{
+ struct keembay_pinctrl *kpc = gpiochip_get_data(gc);
+ unsigned int val;
+
+ val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+ val &= ~KEEMBAY_GPIO_MODE_DIR;
+ keembay_write_reg(val, kpc->base1 + KEEMBAY_GPIO_MODE, pin);
+ keembay_gpio_set(gc, pin, value);
+
+ return 0;
+}
+
+static void keembay_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ unsigned int kmb_irq = irq_desc_get_irq(desc);
+ unsigned long reg, clump = 0, bit = 0;
+ struct irq_chip *parent_chip;
+ struct keembay_pinctrl *kpc;
+ unsigned int src, pin, val;
+
+ /* Identify GPIO interrupt number from GIC interrupt number */
+ for (src = 0; src < KEEMBAY_GPIO_NUM_IRQ; src++) {
+ if (kmb_irq == gc->irq.parents[src])
+ break;
+ }
+
+ if (src == KEEMBAY_GPIO_NUM_IRQ)
+ return;
+
+ parent_chip = irq_desc_get_chip(desc);
+ kpc = gpiochip_get_data(gc);
+
+ chained_irq_enter(parent_chip, desc);
+ reg = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_INT_CFG, src);
+
+ /*
+ * Each Interrupt line can be shared by up to 4 GPIO pins. Enable bit
+ * and input values were checked to identify the source of the
+ * Interrupt. The checked enable bit positions are 7, 15, 23 and 31.
+ */
+ for_each_set_clump8(bit, clump, &reg, BITS_PER_TYPE(typeof(reg))) {
+ pin = clump & ~KEEMBAY_GPIO_IRQ_ENABLE;
+ val = keembay_read_pin(kpc->base0 + KEEMBAY_GPIO_DATA_IN, pin);
+ kmb_irq = irq_linear_revmap(gc->irq.domain, pin);
+
+ /* Checks if the interrupt is enabled */
+ if (val && (clump & KEEMBAY_GPIO_IRQ_ENABLE))
+ generic_handle_irq(kmb_irq);
+ }
+ chained_irq_exit(parent_chip, desc);
+}
+
+static void keembay_gpio_clear_irq(struct irq_data *data, unsigned long pos,
+ u32 src, irq_hw_number_t pin)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct keembay_pinctrl *kpc = gpiochip_get_data(gc);
+ unsigned long trig = irqd_get_trigger_type(data);
+ struct keembay_gpio_irq *irq = &kpc->irq[src];
+ unsigned long val;
+
+ /* Check if the value of pos/KEEMBAY_GPIO_NUM_IRQ is in valid range. */
+ if ((pos / KEEMBAY_GPIO_NUM_IRQ) >= KEEMBAY_GPIO_MAX_PER_IRQ)
+ return;
+
+ /* Retains val register as it handles other interrupts as well. */
+ val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_INT_CFG, src);
+
+ bitmap_set_value8(&val, 0, pos);
+ keembay_write_reg(val, kpc->base1 + KEEMBAY_GPIO_INT_CFG, src);
+
+ irq->num_share--;
+ irq->pins[pos / KEEMBAY_GPIO_NUM_IRQ] = 0;
+
+ if (trig & IRQ_TYPE_LEVEL_MASK)
+ keembay_gpio_restore_default(kpc, pin);
+
+ if (irq->trigger == IRQ_TYPE_LEVEL_HIGH)
+ kpc->max_gpios_level_type++;
+ else if (irq->trigger == IRQ_TYPE_EDGE_RISING)
+ kpc->max_gpios_edge_type++;
+}
+
+static int keembay_find_free_slot(struct keembay_pinctrl *kpc, unsigned int src)
+{
+ unsigned long val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_INT_CFG, src);
+
+ return bitmap_find_free_region(&val, KEEMBAY_GPIO_MAX_PER_REG, 3) / KEEMBAY_GPIO_NUM_IRQ;
+}
+
+static int keembay_find_free_src(struct keembay_pinctrl *kpc, unsigned int trig)
+{
+ int src, type = 0;
+
+ if (trig & IRQ_TYPE_LEVEL_MASK)
+ type = IRQ_TYPE_LEVEL_HIGH;
+ else if (trig & IRQ_TYPE_EDGE_BOTH)
+ type = IRQ_TYPE_EDGE_RISING;
+
+ for (src = 0; src < KEEMBAY_GPIO_NUM_IRQ; src++) {
+ if (kpc->irq[src].trigger != type)
+ continue;
+
+ if (!keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_INT_CFG, src) ||
+ kpc->irq[src].num_share < KEEMBAY_GPIO_MAX_PER_IRQ)
+ return src;
+ }
+
+ return -EBUSY;
+}
+
+static void keembay_gpio_set_irq(struct keembay_pinctrl *kpc, int src,
+ int slot, irq_hw_number_t pin)
+{
+ unsigned long val = pin | KEEMBAY_GPIO_IRQ_ENABLE;
+ struct keembay_gpio_irq *irq = &kpc->irq[src];
+ unsigned long flags, reg;
+
+ raw_spin_lock_irqsave(&kpc->lock, flags);
+ reg = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_INT_CFG, src);
+ bitmap_set_value8(&reg, val, slot * 8);
+ keembay_write_reg(reg, kpc->base1 + KEEMBAY_GPIO_INT_CFG, src);
+ raw_spin_unlock_irqrestore(&kpc->lock, flags);
+
+ if (irq->trigger == IRQ_TYPE_LEVEL_HIGH)
+ kpc->max_gpios_level_type--;
+ else if (irq->trigger == IRQ_TYPE_EDGE_RISING)
+ kpc->max_gpios_edge_type--;
+
+ irq->source = src;
+ irq->pins[slot] = pin;
+ irq->num_share++;
+}
+
+static void keembay_gpio_irq_enable(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct keembay_pinctrl *kpc = gpiochip_get_data(gc);
+ unsigned int trig = irqd_get_trigger_type(data);
+ irq_hw_number_t pin = irqd_to_hwirq(data);
+ int src, slot;
+
+ /* Check which Interrupt source and slot is available */
+ src = keembay_find_free_src(kpc, trig);
+ slot = keembay_find_free_slot(kpc, src);
+
+ if (src < 0 || slot < 0)
+ return;
+
+ if (trig & KEEMBAY_GPIO_SENSE_LOW)
+ keembay_gpio_invert(kpc, pin);
+
+ keembay_gpio_set_irq(kpc, src, slot, pin);
+}
+
+static void keembay_gpio_irq_ack(struct irq_data *data)
+{
+ /*
+ * The keembay_gpio_irq_ack function is needed to handle_edge_irq.
+ * IRQ ack is not possible from the SOC perspective. The IP by itself
+ * is used for handling interrupts which do not come in short-time and
+ * not used as protocol or communication interrupts. All the interrupts
+ * are threaded IRQ interrupts. But this function is expected to be
+ * present as the gpio IP is registered with irq framework. Otherwise
+ * handle_edge_irq() fails.
+ */
+}
+
+static void keembay_gpio_irq_disable(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct keembay_pinctrl *kpc = gpiochip_get_data(gc);
+ irq_hw_number_t pin = irqd_to_hwirq(data);
+ unsigned long reg, clump = 0, pos = 0;
+ unsigned int src;
+
+ for (src = 0; src < KEEMBAY_GPIO_NUM_IRQ; src++) {
+ reg = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_INT_CFG, src);
+ for_each_set_clump8(pos, clump, &reg, BITS_PER_TYPE(typeof(reg))) {
+ if ((clump & ~KEEMBAY_GPIO_IRQ_ENABLE) == pin) {
+ keembay_gpio_clear_irq(data, pos, src, pin);
+ return;
+ }
+ }
+ }
+}
+
+static int keembay_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct keembay_pinctrl *kpc = gpiochip_get_data(gc);
+
+ /* Change EDGE_BOTH as EDGE_RISING in order to claim the IRQ for power button */
+ if (!kpc->max_gpios_edge_type && (type & IRQ_TYPE_EDGE_BOTH))
+ type = IRQ_TYPE_EDGE_RISING;
+
+ if (!kpc->max_gpios_level_type && (type & IRQ_TYPE_LEVEL_MASK))
+ type = IRQ_TYPE_NONE;
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(data, handle_edge_irq);
+ else if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(data, handle_level_irq);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int keembay_gpio_add_pin_ranges(struct gpio_chip *chip)
+{
+ struct keembay_pinctrl *kpc = gpiochip_get_data(chip);
+ int ret;
+
+ ret = gpiochip_add_pin_range(chip, dev_name(kpc->dev), 0, 0, chip->ngpio);
+ if (ret)
+ dev_err_probe(kpc->dev, ret, "failed to add GPIO pin range\n");
+ return ret;
+}
+
+static struct irq_chip keembay_gpio_irqchip = {
+ .name = "keembay-gpio",
+ .irq_enable = keembay_gpio_irq_enable,
+ .irq_disable = keembay_gpio_irq_disable,
+ .irq_set_type = keembay_gpio_irq_set_type,
+ .irq_ack = keembay_gpio_irq_ack,
+};
+
+static int keembay_gpiochip_probe(struct keembay_pinctrl *kpc,
+ struct platform_device *pdev)
+{
+ unsigned int i, level_line = 0, edge_line = 0;
+ struct gpio_chip *gc = &kpc->chip;
+ struct gpio_irq_chip *girq;
+
+ /* Setup GPIO IRQ chip */
+ girq = &kpc->chip.irq;
+ girq->chip = &keembay_gpio_irqchip;
+ girq->parent_handler = keembay_gpio_irq_handler;
+ girq->num_parents = KEEMBAY_GPIO_NUM_IRQ;
+ girq->parents = devm_kcalloc(kpc->dev, girq->num_parents,
+ sizeof(*girq->parents), GFP_KERNEL);
+
+ if (!girq->parents)
+ return -ENOMEM;
+
+ /* Setup GPIO chip */
+ gc->label = dev_name(kpc->dev);
+ gc->parent = kpc->dev;
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
+ gc->get_direction = keembay_gpio_get_direction;
+ gc->direction_input = keembay_gpio_set_direction_in;
+ gc->direction_output = keembay_gpio_set_direction_out;
+ gc->get = keembay_gpio_get;
+ gc->set = keembay_gpio_set;
+ gc->set_config = gpiochip_generic_config;
+ gc->base = -1;
+ gc->ngpio = kpc->npins;
+ gc->add_pin_ranges = keembay_gpio_add_pin_ranges;
+
+ for (i = 0; i < KEEMBAY_GPIO_NUM_IRQ; i++) {
+ struct keembay_gpio_irq *kmb_irq = &kpc->irq[i];
+ int irq;
+
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq <= 0)
+ continue;
+
+ girq->parents[i] = irq;
+ kmb_irq->line = girq->parents[i];
+ kmb_irq->source = i;
+ kmb_irq->trigger = irq_get_trigger_type(girq->parents[i]);
+ kmb_irq->num_share = 0;
+
+ if (kmb_irq->trigger == IRQ_TYPE_LEVEL_HIGH)
+ level_line++;
+ else
+ edge_line++;
+ }
+
+ kpc->max_gpios_level_type = level_line * KEEMBAY_GPIO_MAX_PER_IRQ;
+ kpc->max_gpios_edge_type = edge_line * KEEMBAY_GPIO_MAX_PER_IRQ;
+
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+ return devm_gpiochip_add_data(kpc->dev, gc, kpc);
+}
+
+static int keembay_build_groups(struct keembay_pinctrl *kpc)
+{
+ struct group_desc *grp;
+ unsigned int i;
+
+ kpc->ngroups = kpc->npins;
+ grp = devm_kcalloc(kpc->dev, kpc->ngroups, sizeof(*grp), GFP_KERNEL);
+ if (!grp)
+ return -ENOMEM;
+
+ /* Each pin is categorised as one group */
+ for (i = 0; i < kpc->ngroups; i++) {
+ const struct pinctrl_pin_desc *pdesc = keembay_pins + i;
+ struct group_desc *kmb_grp = grp + i;
+
+ kmb_grp->name = pdesc->name;
+ kmb_grp->pins = (int *)&pdesc->number;
+ pinctrl_generic_add_group(kpc->pctrl, kmb_grp->name,
+ kmb_grp->pins, 1, NULL);
+ }
+
+ return 0;
+}
+
+static int keembay_pinctrl_reg(struct keembay_pinctrl *kpc, struct device *dev)
+{
+ int ret;
+
+ keembay_pinctrl_desc.pins = keembay_pins;
+ ret = of_property_read_u32(dev->of_node, "ngpios", &kpc->npins);
+ if (ret < 0)
+ return ret;
+ keembay_pinctrl_desc.npins = kpc->npins;
+
+ kpc->pctrl = devm_pinctrl_register(kpc->dev, &keembay_pinctrl_desc, kpc);
+
+ return PTR_ERR_OR_ZERO(kpc->pctrl);
+}
+
+static int keembay_add_functions(struct keembay_pinctrl *kpc,
+ struct function_desc *function)
+{
+ unsigned int i;
+
+ /* Assign the groups for each function */
+ for (i = 0; i < kpc->npins; i++) {
+ const struct pinctrl_pin_desc *pdesc = keembay_pins + i;
+ struct keembay_mux_desc *mux = pdesc->drv_data;
+
+ while (mux->name) {
+ struct function_desc *func;
+ const char **grp;
+ size_t grp_size;
+ u32 j, grp_num;
+
+ for (j = 0; j < kpc->nfuncs; j++) {
+ if (!strcmp(mux->name, function[j].name))
+ break;
+ }
+
+ if (j == kpc->nfuncs)
+ return -EINVAL;
+
+ func = function + j;
+ grp_num = func->num_group_names;
+ grp_size = sizeof(*func->group_names);
+
+ if (!func->group_names) {
+ func->group_names = devm_kcalloc(kpc->dev,
+ grp_num,
+ grp_size,
+ GFP_KERNEL);
+ if (!func->group_names)
+ return -ENOMEM;
+ }
+
+ grp = func->group_names;
+ while (*grp)
+ grp++;
+
+ *grp = pdesc->name;
+ mux++;
+ }
+ }
+
+ /* Add all functions */
+ for (i = 0; i < kpc->nfuncs; i++) {
+ pinmux_generic_add_function(kpc->pctrl,
+ function[i].name,
+ function[i].group_names,
+ function[i].num_group_names,
+ function[i].data);
+ }
+
+ return 0;
+}
+
+static int keembay_build_functions(struct keembay_pinctrl *kpc)
+{
+ struct function_desc *keembay_funcs, *new_funcs;
+ int i;
+
+ /* Allocate total number of functions */
+ kpc->nfuncs = 0;
+ keembay_funcs = kcalloc(kpc->npins * 8, sizeof(*keembay_funcs), GFP_KERNEL);
+ if (!keembay_funcs)
+ return -ENOMEM;
+
+ /* Find total number of functions and each's properties */
+ for (i = 0; i < kpc->npins; i++) {
+ const struct pinctrl_pin_desc *pdesc = keembay_pins + i;
+ struct keembay_mux_desc *mux = pdesc->drv_data;
+
+ while (mux->name) {
+ struct function_desc *fdesc = keembay_funcs;
+
+ while (fdesc->name) {
+ if (!strcmp(mux->name, fdesc->name)) {
+ fdesc->num_group_names++;
+ break;
+ }
+
+ fdesc++;
+ }
+
+ if (!fdesc->name) {
+ fdesc->name = mux->name;
+ fdesc->num_group_names = 1;
+ fdesc->data = &mux->mode;
+ kpc->nfuncs++;
+ }
+
+ mux++;
+ }
+ }
+
+ /* Reallocate memory based on actual number of functions */
+ new_funcs = krealloc(keembay_funcs, kpc->nfuncs * sizeof(*new_funcs), GFP_KERNEL);
+ if (!new_funcs) {
+ kfree(keembay_funcs);
+ return -ENOMEM;
+ }
+
+ return keembay_add_functions(kpc, new_funcs);
+}
+
+static const struct keembay_pin_soc keembay_data = {
+ .pins = keembay_pins,
+};
+
+static const struct of_device_id keembay_pinctrl_match[] = {
+ { .compatible = "intel,keembay-pinctrl", .data = &keembay_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, keembay_pinctrl_match);
+
+static int keembay_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct keembay_pinctrl *kpc;
+ int ret;
+
+ kpc = devm_kzalloc(dev, sizeof(*kpc), GFP_KERNEL);
+ if (!kpc)
+ return -ENOMEM;
+
+ kpc->dev = dev;
+ kpc->soc = device_get_match_data(dev);
+
+ kpc->base0 = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(kpc->base0))
+ return PTR_ERR(kpc->base0);
+
+ kpc->base1 = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(kpc->base1))
+ return PTR_ERR(kpc->base1);
+
+ raw_spin_lock_init(&kpc->lock);
+
+ ret = keembay_pinctrl_reg(kpc, dev);
+ if (ret)
+ return ret;
+
+ ret = keembay_build_groups(kpc);
+ if (ret)
+ return ret;
+
+ ret = keembay_build_functions(kpc);
+ if (ret)
+ return ret;
+
+ ret = keembay_gpiochip_probe(kpc, pdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, kpc);
+
+ return 0;
+}
+
+static struct platform_driver keembay_pinctrl_driver = {
+ .probe = keembay_pinctrl_probe,
+ .driver = {
+ .name = "keembay-pinctrl",
+ .of_match_table = keembay_pinctrl_match,
+ },
+};
+module_platform_driver(keembay_pinctrl_driver);
+
+MODULE_AUTHOR("Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>");
+MODULE_AUTHOR("Vijayakannan Ayyathurai <vijayakannan.ayyathurai@intel.com>");
+MODULE_AUTHOR("Lakshmi Sowjanya D <lakshmi.sowjanya.d@intel.com>");
+MODULE_DESCRIPTION("Intel Keem Bay SoC pinctrl/GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index aa6e72214609..67bec7ea0f8b 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1115,7 +1115,7 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
{
const char *name = "pinctrl-single,bits";
struct pcs_func_vals *vals;
- int rows, *pins, found = 0, res = -ENOMEM, i, fsel, gsel;
+ int rows, *pins, found = 0, res = -ENOMEM, i, fsel;
int npins_in_row;
struct pcs_function *function = NULL;
@@ -1125,6 +1125,11 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
return -EINVAL;
}
+ if (PCS_HAS_PINCONF) {
+ dev_err(pcs->dev, "pinconf not supported\n");
+ return -ENOTSUPP;
+ }
+
npins_in_row = pcs->width / pcs->bits_per_pin;
vals = devm_kzalloc(pcs->dev,
@@ -1212,29 +1217,19 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
goto free_pins;
}
- gsel = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs);
- if (gsel < 0) {
- res = gsel;
+ res = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs);
+ if (res < 0)
goto free_function;
- }
(*map)->type = PIN_MAP_TYPE_MUX_GROUP;
(*map)->data.mux.group = np->name;
(*map)->data.mux.function = np->name;
- if (PCS_HAS_PINCONF) {
- dev_err(pcs->dev, "pinconf not supported\n");
- goto free_pingroups;
- }
-
*num_maps = 1;
mutex_unlock(&pcs->mutex);
return 0;
-free_pingroups:
- pinctrl_generic_remove_group(pcs->pctl, gsel);
- *num_maps = 1;
free_function:
pinmux_generic_remove_function(pcs->pctl, fsel);
free_pins:
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 008c83107a3c..5fa2488fae87 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -566,7 +566,7 @@ static irqreturn_t stmfx_pinctrl_irq_thread_fn(int irq, void *dev_id)
u8 pending[NR_GPIO_REGS];
u8 src[NR_GPIO_REGS] = {0, 0, 0};
unsigned long n, status;
- int ret;
+ int i, ret;
ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_IRQ_GPI_PENDING,
&pending, NR_GPIO_REGS);
@@ -576,7 +576,9 @@ static irqreturn_t stmfx_pinctrl_irq_thread_fn(int irq, void *dev_id)
regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
src, NR_GPIO_REGS);
- status = *(unsigned long *)pending;
+ BUILD_BUG_ON(NR_GPIO_REGS > sizeof(status));
+ for (i = 0, status = 0; i < NR_GPIO_REGS; i++)
+ status |= (unsigned long)pending[i] << (i * 8);
for_each_set_bit(n, &status, gc->ngpio) {
handle_nested_irq(irq_find_mapping(gc->irq.domain, n));
stmfx_pinctrl_irq_toggle_trigger(pctl, n);
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index 5fb924a2eedd..a96af8a76a7a 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -1028,6 +1028,7 @@ static int zynq_pinconf_cfg_get(struct pinctrl_dev *pctldev,
break;
}
case PIN_CONFIG_IOSTANDARD:
+ case PIN_CONFIG_POWER_SOURCE:
arg = zynq_pinconf_iostd_get(reg);
break;
default:
@@ -1078,6 +1079,7 @@ static int zynq_pinconf_cfg_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_IOSTANDARD:
+ case PIN_CONFIG_POWER_SOURCE:
if (arg <= zynq_iostd_min || arg >= zynq_iostd_max) {
dev_warn(pctldev->dev,
"unsupported IO standard '%u'\n",
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
index bbde676b7313..e14012209992 100644
--- a/drivers/pinctrl/pinctrl-zynqmp.c
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -866,15 +866,6 @@ static int zynqmp_pinctrl_probe(struct platform_device *pdev)
return ret;
}
-static int zynqmp_pinctrl_remove(struct platform_device *pdev)
-{
- struct zynqmp_pinctrl *pctrl = platform_get_drvdata(pdev);
-
- pinctrl_unregister(pctrl->pctrl);
-
- return 0;
-}
-
static const struct of_device_id zynqmp_pinctrl_of_match[] = {
{ .compatible = "xlnx,zynqmp-pinctrl" },
{ }
@@ -887,7 +878,6 @@ static struct platform_driver zynqmp_pinctrl_driver = {
.of_match_table = zynqmp_pinctrl_of_match,
},
.probe = zynqmp_pinctrl_probe,
- .remove = zynqmp_pinctrl_remove,
};
module_platform_driver(zynqmp_pinctrl_driver);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index cad4e60df618..32ea2a8ec02b 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -88,6 +88,14 @@ config PINCTRL_MSM8960
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm 8960 platform.
+config PINCTRL_MDM9607
+ tristate "Qualcomm 9607 pin controller driver"
+ depends on GPIOLIB && OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found in the Qualcomm 9607 platform.
+
config PINCTRL_MDM9615
tristate "Qualcomm 9615 pin controller driver"
depends on OF
@@ -256,6 +264,15 @@ config PINCTRL_SDX55
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SDX55 platform.
+config PINCTRL_SM6115
+ tristate "Qualcomm Technologies Inc SM6115,SM4250 pin controller driver"
+ depends on GPIOLIB && OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM6115 and SM4250 platforms.
+
config PINCTRL_SM6125
tristate "Qualcomm Technologies Inc SM6125 pin controller driver"
depends on OF
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index d696fe2789bb..7a12e8cd2fba 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o
obj-$(CONFIG_PINCTRL_MSM8998) += pinctrl-msm8998.o
obj-$(CONFIG_PINCTRL_QCS404) += pinctrl-qcs404.o
obj-$(CONFIG_PINCTRL_QDF2XXX) += pinctrl-qdf2xxx.o
+obj-$(CONFIG_PINCTRL_MDM9607) += pinctrl-mdm9607.o
obj-$(CONFIG_PINCTRL_MDM9615) += pinctrl-mdm9615.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_PINCTRL_SC8180X) += pinctrl-sc8180x.o
obj-$(CONFIG_PINCTRL_SDM660) += pinctrl-sdm660.o
obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
obj-$(CONFIG_PINCTRL_SDX55) += pinctrl-sdx55.o
+obj-$(CONFIG_PINCTRL_SM6115) += pinctrl-sm6115.o
obj-$(CONFIG_PINCTRL_SM6125) += pinctrl-sm6125.o
obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o
obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9607.c b/drivers/pinctrl/qcom/pinctrl-mdm9607.c
new file mode 100644
index 000000000000..d622b3df0fe7
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-mdm9607.c
@@ -0,0 +1,1087 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ *
+ * based on pinctrl-msm8916.c
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+static const struct pinctrl_pin_desc mdm9607_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "SDC1_CLK"),
+ PINCTRL_PIN(81, "SDC1_CMD"),
+ PINCTRL_PIN(82, "SDC1_DATA"),
+ PINCTRL_PIN(83, "SDC2_CLK"),
+ PINCTRL_PIN(84, "SDC2_CMD"),
+ PINCTRL_PIN(85, "SDC2_DATA"),
+ PINCTRL_PIN(86, "QDSD_CLK"),
+ PINCTRL_PIN(87, "QDSD_CMD"),
+ PINCTRL_PIN(88, "QDSD_DATA0"),
+ PINCTRL_PIN(89, "QDSD_DATA1"),
+ PINCTRL_PIN(90, "QDSD_DATA2"),
+ PINCTRL_PIN(91, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+
+static const unsigned int sdc1_clk_pins[] = { 80 };
+static const unsigned int sdc1_cmd_pins[] = { 81 };
+static const unsigned int sdc1_data_pins[] = { 82 };
+static const unsigned int sdc2_clk_pins[] = { 83 };
+static const unsigned int sdc2_cmd_pins[] = { 84 };
+static const unsigned int sdc2_data_pins[] = { 85 };
+static const unsigned int qdsd_clk_pins[] = { 86 };
+static const unsigned int qdsd_cmd_pins[] = { 87 };
+static const unsigned int qdsd_data0_pins[] = { 88 };
+static const unsigned int qdsd_data1_pins[] = { 89 };
+static const unsigned int qdsd_data2_pins[] = { 90 };
+static const unsigned int qdsd_data3_pins[] = { 91 };
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = 0x1000 * id, \
+ .io_reg = 0x4 + 0x1000 * id, \
+ .intr_cfg_reg = 0x8 + 0x1000 * id, \
+ .intr_status_reg = 0xc + 0x1000 * id, \
+ .intr_target_reg = 0x8 + 0x1000 * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_target_kpss_val = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+enum mdm9607_functions {
+ msm_mux_adsp_ext,
+ msm_mux_atest_bbrx0,
+ msm_mux_atest_bbrx1,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_combodac_to_gpio_native,
+ msm_mux_atest_gpsadc_dtest0_native,
+ msm_mux_atest_gpsadc_dtest1_native,
+ msm_mux_atest_tsens,
+ msm_mux_backlight_en_b,
+ msm_mux_bimc_dte0,
+ msm_mux_bimc_dte1,
+ msm_mux_blsp1_spi,
+ msm_mux_blsp2_spi,
+ msm_mux_blsp3_spi,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_i2c2,
+ msm_mux_blsp_i2c3,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_i2c5,
+ msm_mux_blsp_i2c6,
+ msm_mux_blsp_spi1,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_spi3,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_uart3,
+ msm_mux_blsp_uart4,
+ msm_mux_blsp_uart5,
+ msm_mux_blsp_uart6,
+ msm_mux_blsp_uim1,
+ msm_mux_blsp_uim2,
+ msm_mux_codec_int,
+ msm_mux_codec_rst,
+ msm_mux_coex_uart,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_ebi0_wrcdc,
+ msm_mux_ebi2_a,
+ msm_mux_ebi2_a_d_8_b,
+ msm_mux_ebi2_lcd,
+ msm_mux_ebi2_lcd_cs_n_b,
+ msm_mux_ebi2_lcd_te_b,
+ msm_mux_eth_irq,
+ msm_mux_eth_rst,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_gcc_plltest,
+ msm_mux_gcc_tlmm,
+ msm_mux_gmac_mdio,
+ msm_mux_gpio,
+ msm_mux_gsm0_tx,
+ msm_mux_lcd_rst,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_m_voc,
+ msm_mux_modem_tsync,
+ msm_mux_nav_ptp_pps_in_a,
+ msm_mux_nav_ptp_pps_in_b,
+ msm_mux_nav_tsync_out_a,
+ msm_mux_nav_tsync_out_b,
+ msm_mux_pa_indicator,
+ msm_mux_pbs0,
+ msm_mux_pbs1,
+ msm_mux_pbs2,
+ msm_mux_pri_mi2s_data0_a,
+ msm_mux_pri_mi2s_data1_a,
+ msm_mux_pri_mi2s_mclk_a,
+ msm_mux_pri_mi2s_sck_a,
+ msm_mux_pri_mi2s_ws_a,
+ msm_mux_prng_rosc,
+ msm_mux_ptp_pps_out_a,
+ msm_mux_ptp_pps_out_b,
+ msm_mux_pwr_crypto_enabled_a,
+ msm_mux_pwr_crypto_enabled_b,
+ msm_mux_pwr_modem_enabled_a,
+ msm_mux_pwr_modem_enabled_b,
+ msm_mux_pwr_nav_enabled_a,
+ msm_mux_pwr_nav_enabled_b,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_rcm_marker1,
+ msm_mux_rcm_marker2,
+ msm_mux_sd_write,
+ msm_mux_sec_mi2s,
+ msm_mux_sensor_en,
+ msm_mux_sensor_int2,
+ msm_mux_sensor_int3,
+ msm_mux_sensor_rst,
+ msm_mux_ssbi1,
+ msm_mux_ssbi2,
+ msm_mux_touch_rst,
+ msm_mux_ts_int,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_uim_batt,
+ msm_mux_wlan_en1,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79",
+};
+static const char * const blsp_spi3_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const blsp_uart3_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio0", "gpio1", "gpio4", "gpio5", "gpio20", "gpio21", "gpio22",
+ "gpio23", "gpio24", "gpio25", "gpio26", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79",
+};
+static const char * const bimc_dte1_groups[] = {
+ "gpio1", "gpio24",
+};
+static const char * const blsp_i2c3_groups[] = {
+ "gpio2", "gpio3",
+};
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio2",
+};
+static const char * const bimc_dte0_groups[] = {
+ "gpio2", "gpio15",
+};
+static const char * const qdss_cti_trig_in_a1_groups[] = {
+ "gpio3",
+};
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uim2_groups[] = {
+ "gpio4", "gpio5",
+};
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7",
+};
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio6",
+};
+static const char * const sensor_int2_groups[] = {
+ "gpio8",
+};
+static const char * const blsp_spi5_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const blsp_uart5_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const ebi2_lcd_groups[] = {
+ "gpio8", "gpio11", "gpio74", "gpio78",
+};
+static const char * const m_voc_groups[] = {
+ "gpio8", "gpio78",
+};
+static const char * const sensor_int3_groups[] = {
+ "gpio9",
+};
+static const char * const sensor_en_groups[] = {
+ "gpio10",
+};
+static const char * const blsp_i2c5_groups[] = {
+ "gpio10", "gpio11",
+};
+static const char * const ebi2_a_groups[] = {
+ "gpio10",
+};
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio10", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43", "gpio46",
+ "gpio47", "gpio48", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55",
+ "gpio58", "gpio59",
+};
+static const char * const sensor_rst_groups[] = {
+ "gpio11",
+};
+static const char * const blsp2_spi_groups[] = {
+ "gpio11", "gpio13", "gpio77",
+};
+static const char * const blsp_spi1_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const blsp_uim1_groups[] = {
+ "gpio12", "gpio13",
+};
+static const char * const blsp3_spi_groups[] = {
+ "gpio12", "gpio26", "gpio76",
+};
+static const char * const gcc_gp2_clk_b_groups[] = {
+ "gpio12",
+};
+static const char * const gcc_gp3_clk_b_groups[] = {
+ "gpio13",
+};
+static const char * const blsp_i2c1_groups[] = {
+ "gpio14", "gpio15",
+};
+static const char * const gcc_gp1_clk_b_groups[] = {
+ "gpio14",
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const blsp_uart4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const rcm_marker1_groups[] = {
+ "gpio18",
+};
+static const char * const blsp_i2c4_groups[] = {
+ "gpio18", "gpio19",
+};
+static const char * const qdss_cti_trig_out_a1_groups[] = {
+ "gpio18",
+};
+static const char * const rcm_marker2_groups[] = {
+ "gpio19",
+};
+static const char * const qdss_cti_trig_out_a0_groups[] = {
+ "gpio19",
+};
+static const char * const blsp_spi6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const blsp_uart6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const pri_mi2s_ws_a_groups[] = {
+ "gpio20",
+};
+static const char * const ebi2_lcd_te_b_groups[] = {
+ "gpio20",
+};
+static const char * const blsp1_spi_groups[] = {
+ "gpio20", "gpio21", "gpio78",
+};
+static const char * const backlight_en_b_groups[] = {
+ "gpio21",
+};
+static const char * const pri_mi2s_data0_a_groups[] = {
+ "gpio21",
+};
+static const char * const pri_mi2s_data1_a_groups[] = {
+ "gpio22",
+};
+static const char * const blsp_i2c6_groups[] = {
+ "gpio22", "gpio23",
+};
+static const char * const ebi2_a_d_8_b_groups[] = {
+ "gpio22",
+};
+static const char * const pri_mi2s_sck_a_groups[] = {
+ "gpio23",
+};
+static const char * const ebi2_lcd_cs_n_b_groups[] = {
+ "gpio23",
+};
+static const char * const touch_rst_groups[] = {
+ "gpio24",
+};
+static const char * const pri_mi2s_mclk_a_groups[] = {
+ "gpio24",
+};
+static const char * const pwr_nav_enabled_a_groups[] = {
+ "gpio24",
+};
+static const char * const ts_int_groups[] = {
+ "gpio25",
+};
+static const char * const sd_write_groups[] = {
+ "gpio25",
+};
+static const char * const pwr_crypto_enabled_a_groups[] = {
+ "gpio25",
+};
+static const char * const codec_rst_groups[] = {
+ "gpio26",
+};
+static const char * const adsp_ext_groups[] = {
+ "gpio26",
+};
+static const char * const atest_combodac_to_gpio_native_groups[] = {
+ "gpio26", "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32",
+ "gpio33", "gpio34", "gpio35", "gpio41", "gpio45", "gpio49", "gpio50",
+ "gpio51", "gpio52", "gpio54", "gpio55", "gpio57", "gpio59",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio27",
+};
+static const char * const gmac_mdio_groups[] = {
+ "gpio27", "gpio28",
+};
+static const char * const gcc_gp1_clk_a_groups[] = {
+ "gpio27",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio28",
+};
+static const char * const gcc_gp2_clk_a_groups[] = {
+ "gpio28",
+};
+static const char * const eth_irq_groups[] = {
+ "gpio29",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio29",
+};
+static const char * const gcc_gp3_clk_a_groups[] = {
+ "gpio29",
+};
+static const char * const eth_rst_groups[] = {
+ "gpio30",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio30",
+};
+static const char * const prng_rosc_groups[] = {
+ "gpio30",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio31",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio32",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio33",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio34",
+};
+static const char * const gcc_plltest_groups[] = {
+ "gpio34", "gpio35",
+};
+static const char * const uim_batt_groups[] = {
+ "gpio35",
+};
+static const char * const coex_uart_groups[] = {
+ "gpio36", "gpio37",
+};
+static const char * const codec_int_groups[] = {
+ "gpio38",
+};
+static const char * const qdss_cti_trig_in_a0_groups[] = {
+ "gpio38",
+};
+static const char * const atest_bbrx1_groups[] = {
+ "gpio39",
+};
+static const char * const cri_trng0_groups[] = {
+ "gpio40",
+};
+static const char * const atest_bbrx0_groups[] = {
+ "gpio40",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio42",
+};
+static const char * const qdss_cti_trig_in_b0_groups[] = {
+ "gpio44",
+};
+static const char * const atest_gpsadc_dtest0_native_groups[] = {
+ "gpio44",
+};
+static const char * const qdss_cti_trig_out_b0_groups[] = {
+ "gpio45",
+};
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio49",
+};
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio50",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio51",
+};
+static const char * const modem_tsync_groups[] = {
+ "gpio53",
+};
+static const char * const nav_tsync_out_a_groups[] = {
+ "gpio53",
+};
+static const char * const nav_ptp_pps_in_a_groups[] = {
+ "gpio53",
+};
+static const char * const ptp_pps_out_a_groups[] = {
+ "gpio53",
+};
+static const char * const gsm0_tx_groups[] = {
+ "gpio55",
+};
+static const char * const qdss_cti_trig_in_b1_groups[] = {
+ "gpio56",
+};
+static const char * const cri_trng1_groups[] = {
+ "gpio57",
+};
+static const char * const qdss_cti_trig_out_b1_groups[] = {
+ "gpio57",
+};
+static const char * const ssbi1_groups[] = {
+ "gpio58",
+};
+static const char * const atest_gpsadc_dtest1_native_groups[] = {
+ "gpio58",
+};
+static const char * const ssbi2_groups[] = {
+ "gpio59",
+};
+static const char * const atest_char3_groups[] = {
+ "gpio60",
+};
+static const char * const atest_char2_groups[] = {
+ "gpio61",
+};
+static const char * const atest_char1_groups[] = {
+ "gpio62",
+};
+static const char * const atest_char0_groups[] = {
+ "gpio63",
+};
+static const char * const atest_char_groups[] = {
+ "gpio64",
+};
+static const char * const ebi0_wrcdc_groups[] = {
+ "gpio70",
+};
+static const char * const ldo_update_groups[] = {
+ "gpio72",
+};
+static const char * const gcc_tlmm_groups[] = {
+ "gpio72",
+};
+static const char * const ldo_en_groups[] = {
+ "gpio73",
+};
+static const char * const dbg_out_groups[] = {
+ "gpio73",
+};
+static const char * const atest_tsens_groups[] = {
+ "gpio73",
+};
+static const char * const lcd_rst_groups[] = {
+ "gpio74",
+};
+static const char * const wlan_en1_groups[] = {
+ "gpio75",
+};
+static const char * const nav_tsync_out_b_groups[] = {
+ "gpio75",
+};
+static const char * const nav_ptp_pps_in_b_groups[] = {
+ "gpio75",
+};
+static const char * const ptp_pps_out_b_groups[] = {
+ "gpio75",
+};
+static const char * const pbs0_groups[] = {
+ "gpio76",
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio76", "gpio77", "gpio78", "gpio79",
+};
+static const char * const pwr_modem_enabled_a_groups[] = {
+ "gpio76",
+};
+static const char * const pbs1_groups[] = {
+ "gpio77",
+};
+static const char * const pwr_modem_enabled_b_groups[] = {
+ "gpio77",
+};
+static const char * const pbs2_groups[] = {
+ "gpio78",
+};
+static const char * const pwr_nav_enabled_b_groups[] = {
+ "gpio78",
+};
+static const char * const pwr_crypto_enabled_b_groups[] = {
+ "gpio79",
+};
+
+static const struct msm_function mdm9607_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(atest_bbrx0),
+ FUNCTION(atest_bbrx1),
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_combodac_to_gpio_native),
+ FUNCTION(atest_gpsadc_dtest0_native),
+ FUNCTION(atest_gpsadc_dtest1_native),
+ FUNCTION(atest_tsens),
+ FUNCTION(backlight_en_b),
+ FUNCTION(bimc_dte0),
+ FUNCTION(bimc_dte1),
+ FUNCTION(blsp1_spi),
+ FUNCTION(blsp2_spi),
+ FUNCTION(blsp3_spi),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uart3),
+ FUNCTION(blsp_uart4),
+ FUNCTION(blsp_uart5),
+ FUNCTION(blsp_uart6),
+ FUNCTION(blsp_uim1),
+ FUNCTION(blsp_uim2),
+ FUNCTION(codec_int),
+ FUNCTION(codec_rst),
+ FUNCTION(coex_uart),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(dbg_out),
+ FUNCTION(ebi0_wrcdc),
+ FUNCTION(ebi2_a),
+ FUNCTION(ebi2_a_d_8_b),
+ FUNCTION(ebi2_lcd),
+ FUNCTION(ebi2_lcd_cs_n_b),
+ FUNCTION(ebi2_lcd_te_b),
+ FUNCTION(eth_irq),
+ FUNCTION(eth_rst),
+ FUNCTION(gcc_gp1_clk_a),
+ FUNCTION(gcc_gp1_clk_b),
+ FUNCTION(gcc_gp2_clk_a),
+ FUNCTION(gcc_gp2_clk_b),
+ FUNCTION(gcc_gp3_clk_a),
+ FUNCTION(gcc_gp3_clk_b),
+ FUNCTION(gcc_plltest),
+ FUNCTION(gcc_tlmm),
+ FUNCTION(gmac_mdio),
+ FUNCTION(gpio),
+ FUNCTION(gsm0_tx),
+ FUNCTION(lcd_rst),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(m_voc),
+ FUNCTION(modem_tsync),
+ FUNCTION(nav_ptp_pps_in_a),
+ FUNCTION(nav_ptp_pps_in_b),
+ FUNCTION(nav_tsync_out_a),
+ FUNCTION(nav_tsync_out_b),
+ FUNCTION(pa_indicator),
+ FUNCTION(pbs0),
+ FUNCTION(pbs1),
+ FUNCTION(pbs2),
+ FUNCTION(pri_mi2s_data0_a),
+ FUNCTION(pri_mi2s_data1_a),
+ FUNCTION(pri_mi2s_mclk_a),
+ FUNCTION(pri_mi2s_sck_a),
+ FUNCTION(pri_mi2s_ws_a),
+ FUNCTION(prng_rosc),
+ FUNCTION(ptp_pps_out_a),
+ FUNCTION(ptp_pps_out_b),
+ FUNCTION(pwr_crypto_enabled_a),
+ FUNCTION(pwr_crypto_enabled_b),
+ FUNCTION(pwr_modem_enabled_a),
+ FUNCTION(pwr_modem_enabled_b),
+ FUNCTION(pwr_nav_enabled_a),
+ FUNCTION(pwr_nav_enabled_b),
+ FUNCTION(qdss_cti_trig_in_a0),
+ FUNCTION(qdss_cti_trig_in_a1),
+ FUNCTION(qdss_cti_trig_in_b0),
+ FUNCTION(qdss_cti_trig_in_b1),
+ FUNCTION(qdss_cti_trig_out_a0),
+ FUNCTION(qdss_cti_trig_out_a1),
+ FUNCTION(qdss_cti_trig_out_b0),
+ FUNCTION(qdss_cti_trig_out_b1),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(qdss_traceclk_b),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracectl_b),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(rcm_marker1),
+ FUNCTION(rcm_marker2),
+ FUNCTION(sd_write),
+ FUNCTION(sec_mi2s),
+ FUNCTION(sensor_en),
+ FUNCTION(sensor_int2),
+ FUNCTION(sensor_int3),
+ FUNCTION(sensor_rst),
+ FUNCTION(ssbi1),
+ FUNCTION(ssbi2),
+ FUNCTION(touch_rst),
+ FUNCTION(ts_int),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(uim_batt),
+ FUNCTION(wlan_en1)
+};
+
+static const struct msm_pingroup mdm9607_groups[] = {
+ PINGROUP(0, blsp_uart3, blsp_spi3, _, _, _, _, _, qdss_tracedata_a, _),
+ PINGROUP(1, blsp_uart3, blsp_spi3, _, _, _, _, _, qdss_tracedata_a, bimc_dte1),
+ PINGROUP(2, blsp_uart3, blsp_i2c3, blsp_spi3, _, _, _, _, _, qdss_traceclk_a),
+ PINGROUP(3, blsp_uart3, blsp_i2c3, blsp_spi3, _, _, _, _, _, _),
+ PINGROUP(4, blsp_spi2, blsp_uart2, blsp_uim2, _, _, _, _, qdss_tracedata_a, _),
+ PINGROUP(5, blsp_spi2, blsp_uart2, blsp_uim2, _, _, _, _, qdss_tracedata_a, _),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, _, _, _, _, _, _),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, _, _, _, _, _, _),
+ PINGROUP(8, blsp_spi5, blsp_uart5, ebi2_lcd, m_voc, _, _, _, _, _),
+ PINGROUP(9, blsp_spi5, blsp_uart5, _, _, _, _, _, _, _),
+ PINGROUP(10, blsp_spi5, blsp_i2c5, blsp_uart5, ebi2_a, _, _, qdss_tracedata_b, _, _),
+ PINGROUP(11, blsp_spi5, blsp_i2c5, blsp_uart5, blsp2_spi, ebi2_lcd, _, _, _, _),
+ PINGROUP(12, blsp_spi1, blsp_uart1, blsp_uim1, blsp3_spi, gcc_gp2_clk_b, _, _, _, _),
+ PINGROUP(13, blsp_spi1, blsp_uart1, blsp_uim1, blsp2_spi, gcc_gp3_clk_b, _, _, _, _),
+ PINGROUP(14, blsp_spi1, blsp_uart1, blsp_i2c1, gcc_gp1_clk_b, _, _, _, _, _),
+ PINGROUP(15, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, _),
+ PINGROUP(16, blsp_spi4, blsp_uart4, _, _, _, _, _, _, _),
+ PINGROUP(17, blsp_spi4, blsp_uart4, _, _, _, _, _, _, _),
+ PINGROUP(18, blsp_spi4, blsp_uart4, blsp_i2c4, _, _, _, _, _, _),
+ PINGROUP(19, blsp_spi4, blsp_uart4, blsp_i2c4, _, _, _, _, _, _),
+ PINGROUP(20, blsp_spi6, blsp_uart6, pri_mi2s_ws_a, ebi2_lcd_te_b, blsp1_spi, _, _, _,
+ qdss_tracedata_a),
+ PINGROUP(21, blsp_spi6, blsp_uart6, pri_mi2s_data0_a, blsp1_spi, _, _, _, _, _),
+ PINGROUP(22, blsp_spi6, blsp_uart6, pri_mi2s_data1_a, blsp_i2c6, ebi2_a_d_8_b, _, _, _, _),
+ PINGROUP(23, blsp_spi6, blsp_uart6, pri_mi2s_sck_a, blsp_i2c6, ebi2_lcd_cs_n_b, _, _, _, _),
+ PINGROUP(24, pri_mi2s_mclk_a, _, pwr_nav_enabled_a, _, _, _, _, qdss_tracedata_a,
+ bimc_dte1),
+ PINGROUP(25, sd_write, _, pwr_crypto_enabled_a, _, _, _, _, qdss_tracedata_a, _),
+ PINGROUP(26, blsp3_spi, adsp_ext, _, qdss_tracedata_a, _, atest_combodac_to_gpio_native, _,
+ _, _),
+ PINGROUP(27, uim2_data, gmac_mdio, gcc_gp1_clk_a, _, _, atest_combodac_to_gpio_native, _, _,
+ _),
+ PINGROUP(28, uim2_clk, gmac_mdio, gcc_gp2_clk_a, _, _, atest_combodac_to_gpio_native, _, _,
+ _),
+ PINGROUP(29, uim2_reset, gcc_gp3_clk_a, _, _, atest_combodac_to_gpio_native, _, _, _, _),
+ PINGROUP(30, uim2_present, prng_rosc, _, _, atest_combodac_to_gpio_native, _, _, _, _),
+ PINGROUP(31, uim1_data, _, _, atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(32, uim1_clk, _, _, atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(33, uim1_reset, _, _, atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(34, uim1_present, gcc_plltest, _, _, atest_combodac_to_gpio_native, _, _, _, _),
+ PINGROUP(35, uim_batt, gcc_plltest, _, atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(36, coex_uart, _, _, _, _, _, _, _, _),
+ PINGROUP(37, coex_uart, _, _, _, _, _, _, _, _),
+ PINGROUP(38, _, _, _, qdss_cti_trig_in_a0, _, _, _, _, _),
+ PINGROUP(39, _, _, _, qdss_tracedata_b, _, atest_bbrx1, _, _, _),
+ PINGROUP(40, _, cri_trng0, _, _, _, _, qdss_tracedata_b, _, atest_bbrx0),
+ PINGROUP(41, _, _, _, _, _, qdss_tracedata_b, _, atest_combodac_to_gpio_native, _),
+ PINGROUP(42, _, cri_trng, _, _, qdss_tracedata_b, _, _, _, _),
+ PINGROUP(43, _, _, _, _, qdss_tracedata_b, _, _, _, _),
+ PINGROUP(44, _, _, qdss_cti_trig_in_b0, _, atest_gpsadc_dtest0_native, _, _, _, _),
+ PINGROUP(45, _, _, qdss_cti_trig_out_b0, _, atest_combodac_to_gpio_native, _, _, _, _),
+ PINGROUP(46, _, _, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(47, _, _, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(48, _, _, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(49, _, _, qdss_tracectl_b, _, atest_combodac_to_gpio_native, _, _, _, _),
+ PINGROUP(50, _, _, qdss_traceclk_b, _, atest_combodac_to_gpio_native, _, _, _, _),
+ PINGROUP(51, _, pa_indicator, _, qdss_tracedata_b, _, atest_combodac_to_gpio_native, _, _,
+ _),
+ PINGROUP(52, _, _, _, qdss_tracedata_b, _, atest_combodac_to_gpio_native, _, _, _),
+ PINGROUP(53, _, modem_tsync, nav_tsync_out_a, nav_ptp_pps_in_a, ptp_pps_out_a,
+ qdss_tracedata_b, _, _, _),
+ PINGROUP(54, _, qdss_tracedata_b, _, atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(55, gsm0_tx, _, qdss_tracedata_b, _, atest_combodac_to_gpio_native, _, _, _, _),
+ PINGROUP(56, _, _, qdss_cti_trig_in_b1, _, _, _, _, _, _),
+ PINGROUP(57, _, cri_trng1, _, qdss_cti_trig_out_b1, _, atest_combodac_to_gpio_native, _, _,
+ _),
+ PINGROUP(58, _, ssbi1, _, qdss_tracedata_b, _, atest_gpsadc_dtest1_native, _, _, _),
+ PINGROUP(59, _, ssbi2, _, qdss_tracedata_b, _, atest_combodac_to_gpio_native, _, _, _),
+ PINGROUP(60, atest_char3, _, _, _, _, _, _, _, _),
+ PINGROUP(61, atest_char2, _, _, _, _, _, _, _, _),
+ PINGROUP(62, atest_char1, _, _, _, _, _, _, _, _),
+ PINGROUP(63, atest_char0, _, _, _, _, _, _, _, _),
+ PINGROUP(64, atest_char, _, _, _, _, _, _, _, _),
+ PINGROUP(65, _, _, _, _, _, _, _, _, _),
+ PINGROUP(66, _, _, _, _, _, _, _, _, _),
+ PINGROUP(67, _, _, _, _, _, _, _, _, _),
+ PINGROUP(68, _, _, _, _, _, _, _, _, _),
+ PINGROUP(69, _, _, _, _, _, _, _, _, _),
+ PINGROUP(70, _, _, ebi0_wrcdc, _, _, _, _, _, _),
+ PINGROUP(71, _, _, _, _, _, _, _, _, _),
+ PINGROUP(72, ldo_update, _, gcc_tlmm, _, _, _, _, _, _),
+ PINGROUP(73, ldo_en, dbg_out, _, _, _, atest_tsens, _, _, _),
+ PINGROUP(74, ebi2_lcd, _, _, _, _, _, _, _, _),
+ PINGROUP(75, nav_tsync_out_b, nav_ptp_pps_in_b, ptp_pps_out_b, _, qdss_tracedata_a, _, _, _,
+ _),
+ PINGROUP(76, pbs0, sec_mi2s, blsp3_spi, pwr_modem_enabled_a, _, qdss_tracedata_a, _, _, _),
+ PINGROUP(77, pbs1, sec_mi2s, blsp2_spi, pwr_modem_enabled_b, _, qdss_tracedata_a, _, _, _),
+ PINGROUP(78, pbs2, sec_mi2s, blsp1_spi, ebi2_lcd, m_voc, pwr_nav_enabled_b, _,
+ qdss_tracedata_a, _),
+ PINGROUP(79, sec_mi2s, _, pwr_crypto_enabled_b, _, qdss_tracedata_a, _, _, _, _),
+ SDC_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_PINGROUP(sdc2_data, 0x109000, 9, 0),
+ SDC_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+};
+
+static const struct msm_pinctrl_soc_data mdm9607_pinctrl = {
+ .pins = mdm9607_pins,
+ .npins = ARRAY_SIZE(mdm9607_pins),
+ .functions = mdm9607_functions,
+ .nfunctions = ARRAY_SIZE(mdm9607_functions),
+ .groups = mdm9607_groups,
+ .ngroups = ARRAY_SIZE(mdm9607_groups),
+ .ngpios = 80,
+};
+
+static int mdm9607_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &mdm9607_pinctrl);
+}
+
+static const struct of_device_id mdm9607_pinctrl_of_match[] = {
+ { .compatible = "qcom,mdm9607-tlmm", },
+ { }
+};
+
+static struct platform_driver mdm9607_pinctrl_driver = {
+ .driver = {
+ .name = "mdm9607-pinctrl",
+ .of_match_table = mdm9607_pinctrl_of_match,
+ },
+ .probe = mdm9607_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init mdm9607_pinctrl_init(void)
+{
+ return platform_driver_register(&mdm9607_pinctrl_driver);
+}
+arch_initcall(mdm9607_pinctrl_init);
+
+static void __exit mdm9607_pinctrl_exit(void)
+{
+ platform_driver_unregister(&mdm9607_pinctrl_driver);
+}
+module_exit(mdm9607_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm mdm9607 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, mdm9607_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6115.c b/drivers/pinctrl/qcom/pinctrl-sm6115.c
new file mode 100644
index 000000000000..b3a0161ca377
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm6115.c
@@ -0,0 +1,923 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+static const char * const sm6115_tiles[] = {
+ "south",
+ "east",
+ "west"
+};
+
+enum {
+ SOUTH,
+ EAST,
+ WEST
+};
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define PINGROUP(id, _tile, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = 0x1000 * id, \
+ .io_reg = 0x4 + 0x1000 * id, \
+ .intr_cfg_reg = 0x8 + 0x1000 * id, \
+ .intr_status_reg = 0xc + 0x1000 * id, \
+ .intr_target_reg = 0x8 + 0x1000 * id, \
+ .tile = _tile, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, _tile, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .tile = _tile, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .tile = WEST, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+static const struct pinctrl_pin_desc sm6115_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "UFS_RESET"),
+ PINCTRL_PIN(114, "SDC1_RCLK"),
+ PINCTRL_PIN(115, "SDC1_CLK"),
+ PINCTRL_PIN(116, "SDC1_CMD"),
+ PINCTRL_PIN(117, "SDC1_DATA"),
+ PINCTRL_PIN(118, "SDC2_CLK"),
+ PINCTRL_PIN(119, "SDC2_CMD"),
+ PINCTRL_PIN(120, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+
+static const unsigned int ufs_reset_pins[] = { 113 };
+static const unsigned int sdc1_rclk_pins[] = { 114 };
+static const unsigned int sdc1_clk_pins[] = { 115 };
+static const unsigned int sdc1_cmd_pins[] = { 116 };
+static const unsigned int sdc1_data_pins[] = { 117 };
+static const unsigned int sdc2_clk_pins[] = { 118 };
+static const unsigned int sdc2_cmd_pins[] = { 119 };
+static const unsigned int sdc2_data_pins[] = { 120 };
+
+enum sm6115_functions {
+ msm_mux_adsp_ext,
+ msm_mux_agera_pll,
+ msm_mux_atest,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer,
+ msm_mux_cri_trng,
+ msm_mux_dac_calib,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gpio,
+ msm_mux_gp_pdm0,
+ msm_mux_gp_pdm1,
+ msm_mux_gp_pdm2,
+ msm_mux_gsm0_tx,
+ msm_mux_gsm1_tx,
+ msm_mux_jitter_bist,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync_out_0,
+ msm_mux_mdp_vsync_out_1,
+ msm_mux_mpm_pwr,
+ msm_mux_mss_lte,
+ msm_mux_m_voc,
+ msm_mux_nav_gpio,
+ msm_mux_pa_indicator,
+ msm_mux_pbs,
+ msm_mux_pbs_out,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist,
+ msm_mux_pll_bypassnl,
+ msm_mux_pll_reset,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qup0,
+ msm_mux_qup1,
+ msm_mux_qup2,
+ msm_mux_qup3,
+ msm_mux_qup4,
+ msm_mux_qup5,
+ msm_mux_sdc1_tb,
+ msm_mux_sdc2_tb,
+ msm_mux_sd_write,
+ msm_mux_ssbi_wtr1,
+ msm_mux_tgu,
+ msm_mux_tsense_pwm,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_usb_phy,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger,
+ msm_mux_wlan1_adc0,
+ msm_mux_wlan1_adc1,
+ msm_mux__,
+};
+
+static const char * const qup0_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio82", "gpio86",
+};
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112",
+};
+static const char * const ddr_bist_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const phase_flag_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6",
+ "gpio14", "gpio15", "gpio16", "gpio17", "gpio22", "gpio23", "gpio24",
+ "gpio25", "gpio26", "gpio29", "gpio30", "gpio31", "gpio32", "gpio33",
+ "gpio35", "gpio36", "gpio43", "gpio44", "gpio45", "gpio63", "gpio64",
+ "gpio102", "gpio103", "gpio104", "gpio105",
+};
+static const char * const qdss_gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio8", "gpio9", "gpio10",
+ "gpio11", "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19",
+ "gpio20", "gpio21", "gpio22", "gpio23", "gpio24", "gpio25", "gpio26",
+ "gpio47", "gpio48", "gpio69", "gpio70", "gpio87", "gpio90", "gpio91",
+ "gpio94", "gpio95", "gpio104", "gpio105", "gpio106", "gpio107",
+ "gpio109", "gpio110",
+};
+static const char * const atest_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio29", "gpio30",
+ "gpio31", "gpio32", "gpio33", "gpio86", "gpio87", "gpio88", "gpio89",
+ "gpio100", "gpio101",
+};
+static const char * const mpm_pwr_groups[] = {
+ "gpio1",
+};
+static const char * const m_voc_groups[] = {
+ "gpio0",
+};
+static const char * const dac_calib_groups[] = {
+ "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio14", "gpio15",
+ "gpio16", "gpio17", "gpio22", "gpio23", "gpio24", "gpio25", "gpio26",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio80", "gpio81",
+ "gpio82", "gpio102", "gpio103", "gpio104", "gpio105"
+};
+static const char * const qup1_groups[] = {
+ "gpio4", "gpio5", "gpio69", "gpio70",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio4", "gpio5", "gpio18",
+};
+static const char * const qup2_groups[] = {
+ "gpio6", "gpio7", "gpio71", "gpio80",
+};
+static const char * const qup3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const pbs_out_groups[] = {
+ "gpio8", "gpio9", "gpio52",
+};
+static const char * const pll_bist_groups[] = {
+ "gpio8", "gpio9",
+};
+static const char * const tsense_pwm_groups[] = {
+ "gpio8",
+};
+static const char * const agera_pll_groups[] = {
+ "gpio10", "gpio11",
+};
+static const char * const pbs_groups[] = {
+ "gpio10", "gpio11", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+ "gpio23", "gpio24", "gpio25", "gpio26", "gpio47", "gpio48", "gpio87",
+ "gpio90", "gpio91",
+};
+static const char * const qup4_groups[] = {
+ "gpio12", "gpio13", "gpio96", "gpio97",
+};
+static const char * const tgu_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const qup5_groups[] = {
+ "gpio14", "gpio15", "gpio16", "gpio17",
+};
+static const char * const sdc2_tb_groups[] = {
+ "gpio18",
+};
+static const char * const sdc1_tb_groups[] = {
+ "gpio19",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio20", "gpio21", "gpio27", "gpio28",
+};
+static const char * const adsp_ext_groups[] = {
+ "gpio21",
+};
+static const char * const cci_i2c_groups[] = {
+ "gpio22", "gpio23", "gpio29", "gpio30",
+};
+static const char * const prng_rosc_groups[] = {
+ "gpio22", "gpio23",
+};
+static const char * const cci_timer_groups[] = {
+ "gpio24", "gpio25", "gpio28", "gpio32",
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio24", "gpio86",
+};
+static const char * const cci_async_groups[] = {
+ "gpio25",
+};
+static const char * const vsense_trigger_groups[] = {
+ "gpio26",
+};
+static const char * const qdss_cti_groups[] = {
+ "gpio27", "gpio28", "gpio72", "gpio73", "gpio96", "gpio97",
+};
+static const char * const gp_pdm0_groups[] = {
+ "gpio31", "gpio95",
+};
+static const char * const gp_pdm1_groups[] = {
+ "gpio32", "gpio96",
+};
+static const char * const gp_pdm2_groups[] = {
+ "gpio33", "gpio97",
+};
+static const char * const nav_gpio_groups[] = {
+ "gpio42", "gpio47", "gpio52", "gpio95", "gpio96", "gpio97", "gpio106",
+ "gpio107", "gpio108",
+};
+static const char * const vfr_1_groups[] = {
+ "gpio48",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio49",
+};
+static const char * const gsm1_tx_groups[] = {
+ "gpio53",
+};
+static const char * const ssbi_wtr1_groups[] = {
+ "gpio59", "gpio60",
+};
+static const char * const pll_bypassnl_groups[] = {
+ "gpio62",
+};
+static const char * const pll_reset_groups[] = {
+ "gpio63",
+};
+static const char * const ddr_pxi0_groups[] = {
+ "gpio63", "gpio64",
+};
+static const char * const gsm0_tx_groups[] = {
+ "gpio64",
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio69", "gpio107",
+};
+static const char * const ddr_pxi1_groups[] = {
+ "gpio69", "gpio70",
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio70", "gpio106",
+};
+static const char * const dbg_out_groups[] = {
+ "gpio71",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio72",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio73",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio74",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio75",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio76",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio77",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio78",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio79",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio81", "gpio96", "gpio97",
+};
+static const char * const mdp_vsync_out_0_groups[] = {
+ "gpio81",
+};
+static const char * const mdp_vsync_out_1_groups[] = {
+ "gpio81",
+};
+static const char * const usb_phy_groups[] = {
+ "gpio89",
+};
+static const char * const mss_lte_groups[] = {
+ "gpio90", "gpio91",
+};
+static const char * const wlan1_adc0_groups[] = {
+ "gpio94",
+};
+static const char * const wlan1_adc1_groups[] = {
+ "gpio95",
+};
+static const char * const sd_write_groups[] = {
+ "gpio96",
+};
+static const char * const jitter_bist_groups[] = {
+ "gpio96", "gpio97",
+};
+static const char * const ddr_pxi2_groups[] = {
+ "gpio102", "gpio103",
+};
+static const char * const ddr_pxi3_groups[] = {
+ "gpio104", "gpio105",
+};
+
+static const struct msm_function sm6115_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(agera_pll),
+ FUNCTION(atest),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer),
+ FUNCTION(cri_trng),
+ FUNCTION(dac_calib),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(ddr_pxi2),
+ FUNCTION(ddr_pxi3),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gpio),
+ FUNCTION(gp_pdm0),
+ FUNCTION(gp_pdm1),
+ FUNCTION(gp_pdm2),
+ FUNCTION(gsm0_tx),
+ FUNCTION(gsm1_tx),
+ FUNCTION(jitter_bist),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mdp_vsync_out_0),
+ FUNCTION(mdp_vsync_out_1),
+ FUNCTION(mpm_pwr),
+ FUNCTION(mss_lte),
+ FUNCTION(m_voc),
+ FUNCTION(nav_gpio),
+ FUNCTION(pa_indicator),
+ FUNCTION(pbs),
+ FUNCTION(pbs_out),
+ FUNCTION(phase_flag),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_bypassnl),
+ FUNCTION(pll_reset),
+ FUNCTION(prng_rosc),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qup0),
+ FUNCTION(qup1),
+ FUNCTION(qup2),
+ FUNCTION(qup3),
+ FUNCTION(qup4),
+ FUNCTION(qup5),
+ FUNCTION(sdc1_tb),
+ FUNCTION(sdc2_tb),
+ FUNCTION(sd_write),
+ FUNCTION(ssbi_wtr1),
+ FUNCTION(tgu),
+ FUNCTION(tsense_pwm),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(usb_phy),
+ FUNCTION(vfr_1),
+ FUNCTION(vsense_trigger),
+ FUNCTION(wlan1_adc0),
+ FUNCTION(wlan1_adc1),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm6115_groups[] = {
+ [0] = PINGROUP(0, WEST, qup0, m_voc, ddr_bist, _, phase_flag, qdss_gpio, atest, _, _),
+ [1] = PINGROUP(1, WEST, qup0, mpm_pwr, ddr_bist, _, phase_flag, qdss_gpio, atest, _, _),
+ [2] = PINGROUP(2, WEST, qup0, ddr_bist, _, phase_flag, qdss_gpio, dac_calib, atest, _, _),
+ [3] = PINGROUP(3, WEST, qup0, ddr_bist, _, phase_flag, qdss_gpio, dac_calib, atest, _, _),
+ [4] = PINGROUP(4, WEST, qup1, cri_trng, _, phase_flag, dac_calib, atest, _, _, _),
+ [5] = PINGROUP(5, WEST, qup1, cri_trng, _, phase_flag, dac_calib, atest, _, _, _),
+ [6] = PINGROUP(6, WEST, qup2, _, phase_flag, dac_calib, atest, _, _, _, _),
+ [7] = PINGROUP(7, WEST, qup2, _, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, EAST, qup3, pbs_out, pll_bist, _, qdss_gpio, _, tsense_pwm, _, _),
+ [9] = PINGROUP(9, EAST, qup3, pbs_out, pll_bist, _, qdss_gpio, _, _, _, _),
+ [10] = PINGROUP(10, EAST, qup3, agera_pll, _, pbs, qdss_gpio, _, _, _, _),
+ [11] = PINGROUP(11, EAST, qup3, agera_pll, _, pbs, qdss_gpio, _, _, _, _),
+ [12] = PINGROUP(12, WEST, qup4, tgu, _, _, _, _, _, _, _),
+ [13] = PINGROUP(13, WEST, qup4, tgu, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, WEST, qup5, tgu, _, phase_flag, qdss_gpio, dac_calib, _, _, _),
+ [15] = PINGROUP(15, WEST, qup5, tgu, _, phase_flag, qdss_gpio, dac_calib, _, _, _),
+ [16] = PINGROUP(16, WEST, qup5, _, phase_flag, qdss_gpio, dac_calib, _, _, _, _),
+ [17] = PINGROUP(17, WEST, qup5, _, phase_flag, qdss_gpio, dac_calib, _, _, _, _),
+ [18] = PINGROUP(18, EAST, sdc2_tb, cri_trng, pbs, qdss_gpio, _, _, _, _, _),
+ [19] = PINGROUP(19, EAST, sdc1_tb, pbs, qdss_gpio, _, _, _, _, _, _),
+ [20] = PINGROUP(20, EAST, cam_mclk, pbs, qdss_gpio, _, _, _, _, _, _),
+ [21] = PINGROUP(21, EAST, cam_mclk, adsp_ext, pbs, qdss_gpio, _, _, _, _, _),
+ [22] = PINGROUP(22, EAST, cci_i2c, prng_rosc, _, pbs, phase_flag, qdss_gpio, dac_calib, atest, _),
+ [23] = PINGROUP(23, EAST, cci_i2c, prng_rosc, _, pbs, phase_flag, qdss_gpio, dac_calib, atest, _),
+ [24] = PINGROUP(24, EAST, cci_timer, gcc_gp1, _, pbs, phase_flag, qdss_gpio, dac_calib, atest, _),
+ [25] = PINGROUP(25, EAST, cci_async, cci_timer, _, pbs, phase_flag, qdss_gpio, dac_calib, atest, _),
+ [26] = PINGROUP(26, EAST, _, pbs, phase_flag, qdss_gpio, dac_calib, atest, vsense_trigger, _, _),
+ [27] = PINGROUP(27, EAST, cam_mclk, qdss_cti, _, _, _, _, _, _, _),
+ [28] = PINGROUP(28, EAST, cam_mclk, cci_timer, qdss_cti, _, _, _, _, _, _),
+ [29] = PINGROUP(29, EAST, cci_i2c, _, phase_flag, dac_calib, atest, _, _, _, _),
+ [30] = PINGROUP(30, EAST, cci_i2c, _, phase_flag, dac_calib, atest, _, _, _, _),
+ [31] = PINGROUP(31, EAST, gp_pdm0, _, phase_flag, dac_calib, atest, _, _, _, _),
+ [32] = PINGROUP(32, EAST, cci_timer, gp_pdm1, _, phase_flag, dac_calib, atest, _, _, _),
+ [33] = PINGROUP(33, EAST, gp_pdm2, _, phase_flag, dac_calib, atest, _, _, _, _),
+ [34] = PINGROUP(34, EAST, _, _, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, EAST, _, phase_flag, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, EAST, _, phase_flag, _, _, _, _, _, _, _),
+ [37] = PINGROUP(37, EAST, _, _, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, EAST, _, _, _, _, _, _, _, _, _),
+ [39] = PINGROUP(39, EAST, _, _, _, _, _, _, _, _, _),
+ [40] = PINGROUP(40, EAST, _, _, _, _, _, _, _, _, _),
+ [41] = PINGROUP(41, EAST, _, _, _, _, _, _, _, _, _),
+ [42] = PINGROUP(42, EAST, _, nav_gpio, _, _, _, _, _, _, _),
+ [43] = PINGROUP(43, EAST, _, _, phase_flag, _, _, _, _, _, _),
+ [44] = PINGROUP(44, EAST, _, _, phase_flag, _, _, _, _, _, _),
+ [45] = PINGROUP(45, EAST, _, _, phase_flag, _, _, _, _, _, _),
+ [46] = PINGROUP(46, EAST, _, _, _, _, _, _, _, _, _),
+ [47] = PINGROUP(47, EAST, _, nav_gpio, pbs, qdss_gpio, _, _, _, _, _),
+ [48] = PINGROUP(48, EAST, _, vfr_1, _, pbs, qdss_gpio, _, _, _, _),
+ [49] = PINGROUP(49, EAST, _, pa_indicator, _, _, _, _, _, _, _),
+ [50] = PINGROUP(50, EAST, _, _, _, _, _, _, _, _, _),
+ [51] = PINGROUP(51, EAST, _, _, _, _, _, _, _, _, _),
+ [52] = PINGROUP(52, EAST, _, nav_gpio, pbs_out, _, _, _, _, _, _),
+ [53] = PINGROUP(53, EAST, _, gsm1_tx, _, _, _, _, _, _, _),
+ [54] = PINGROUP(54, EAST, _, _, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, EAST, _, _, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, EAST, _, _, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, EAST, _, _, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, EAST, _, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, EAST, _, ssbi_wtr1, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, EAST, _, ssbi_wtr1, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, EAST, _, _, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, EAST, _, pll_bypassnl, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, EAST, pll_reset, _, phase_flag, ddr_pxi0, _, _, _, _, _),
+ [64] = PINGROUP(64, EAST, gsm0_tx, _, phase_flag, ddr_pxi0, _, _, _, _, _),
+ [65] = PINGROUP(65, WEST, _, _, _, _, _, _, _, _, _),
+ [66] = PINGROUP(66, WEST, _, _, _, _, _, _, _, _, _),
+ [67] = PINGROUP(67, WEST, _, _, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, WEST, _, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, WEST, qup1, gcc_gp2, qdss_gpio, ddr_pxi1, _, _, _, _, _),
+ [70] = PINGROUP(70, WEST, qup1, gcc_gp3, qdss_gpio, ddr_pxi1, _, _, _, _, _),
+ [71] = PINGROUP(71, WEST, qup2, dbg_out, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, SOUTH, uim2_data, qdss_cti, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, SOUTH, uim2_clk, _, qdss_cti, _, _, _, _, _, _),
+ [74] = PINGROUP(74, SOUTH, uim2_reset, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, SOUTH, uim2_present, _, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, SOUTH, uim1_data, _, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, SOUTH, uim1_clk, _, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, SOUTH, uim1_reset, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, SOUTH, uim1_present, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, WEST, qup2, dac_calib, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, WEST, mdp_vsync_out_0, mdp_vsync_out_1, mdp_vsync, dac_calib, _, _, _, _, _),
+ [82] = PINGROUP(82, WEST, qup0, dac_calib, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, WEST, _, _, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, WEST, _, _, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, WEST, _, _, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, WEST, qup0, gcc_gp1, atest, _, _, _, _, _, _),
+ [87] = PINGROUP(87, EAST, pbs, qdss_gpio, _, _, _, _, _, _, _),
+ [88] = PINGROUP(88, EAST, _, _, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, WEST, usb_phy, atest, _, _, _, _, _, _, _),
+ [90] = PINGROUP(90, EAST, mss_lte, pbs, qdss_gpio, _, _, _, _, _, _),
+ [91] = PINGROUP(91, EAST, mss_lte, pbs, qdss_gpio, _, _, _, _, _, _),
+ [92] = PINGROUP(92, WEST, _, _, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, WEST, _, _, _, _, _, _, _, _, _),
+ [94] = PINGROUP(94, WEST, _, qdss_gpio, wlan1_adc0, _, _, _, _, _, _),
+ [95] = PINGROUP(95, WEST, nav_gpio, gp_pdm0, qdss_gpio, wlan1_adc1, _, _, _, _, _),
+ [96] = PINGROUP(96, WEST, qup4, nav_gpio, mdp_vsync, gp_pdm1, sd_write, jitter_bist, qdss_cti, qdss_cti, _),
+ [97] = PINGROUP(97, WEST, qup4, nav_gpio, mdp_vsync, gp_pdm2, jitter_bist, qdss_cti, qdss_cti, _, _),
+ [98] = PINGROUP(98, SOUTH, _, _, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, SOUTH, _, _, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, SOUTH, atest, _, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, SOUTH, atest, _, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, SOUTH, _, phase_flag, dac_calib, ddr_pxi2, _, _, _, _, _),
+ [103] = PINGROUP(103, SOUTH, _, phase_flag, dac_calib, ddr_pxi2, _, _, _, _, _),
+ [104] = PINGROUP(104, SOUTH, _, phase_flag, qdss_gpio, dac_calib, ddr_pxi3, _, _, _, _),
+ [105] = PINGROUP(105, SOUTH, _, phase_flag, qdss_gpio, dac_calib, ddr_pxi3, _, _, _, _),
+ [106] = PINGROUP(106, SOUTH, nav_gpio, gcc_gp3, qdss_gpio, _, _, _, _, _, _),
+ [107] = PINGROUP(107, SOUTH, nav_gpio, gcc_gp2, qdss_gpio, _, _, _, _, _, _),
+ [108] = PINGROUP(108, SOUTH, nav_gpio, _, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, SOUTH, _, qdss_gpio, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, SOUTH, _, qdss_gpio, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, SOUTH, _, _, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, SOUTH, _, _, _, _, _, _, _, _, _),
+ [113] = UFS_RESET(ufs_reset, 0x78000),
+ [114] = SDC_QDSD_PINGROUP(sdc1_rclk, WEST, 0x75000, 15, 0),
+ [115] = SDC_QDSD_PINGROUP(sdc1_clk, WEST, 0x75000, 13, 6),
+ [116] = SDC_QDSD_PINGROUP(sdc1_cmd, WEST, 0x75000, 11, 3),
+ [117] = SDC_QDSD_PINGROUP(sdc1_data, WEST, 0x75000, 9, 0),
+ [118] = SDC_QDSD_PINGROUP(sdc2_clk, SOUTH, 0x73000, 14, 6),
+ [119] = SDC_QDSD_PINGROUP(sdc2_cmd, SOUTH, 0x73000, 11, 3),
+ [120] = SDC_QDSD_PINGROUP(sdc2_data, SOUTH, 0x73000, 9, 0),
+};
+
+static const struct msm_pinctrl_soc_data sm6115_tlmm = {
+ .pins = sm6115_pins,
+ .npins = ARRAY_SIZE(sm6115_pins),
+ .functions = sm6115_functions,
+ .nfunctions = ARRAY_SIZE(sm6115_functions),
+ .groups = sm6115_groups,
+ .ngroups = ARRAY_SIZE(sm6115_groups),
+ .ngpios = 114,
+ .tiles = sm6115_tiles,
+ .ntiles = ARRAY_SIZE(sm6115_tiles),
+};
+
+static int sm6115_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sm6115_tlmm);
+}
+
+static const struct of_device_id sm6115_tlmm_of_match[] = {
+ { .compatible = "qcom,sm6115-tlmm", },
+ { }
+};
+
+static struct platform_driver sm6115_tlmm_driver = {
+ .driver = {
+ .name = "sm6115-tlmm",
+ .of_match_table = sm6115_tlmm_of_match,
+ },
+ .probe = sm6115_tlmm_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sm6115_tlmm_init(void)
+{
+ return platform_driver_register(&sm6115_tlmm_driver);
+}
+arch_initcall(sm6115_tlmm_init);
+
+static void __exit sm6115_tlmm_exit(void)
+{
+ platform_driver_unregister(&sm6115_tlmm_driver);
+}
+module_exit(sm6115_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI sm6115 tlmm driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sm6115_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index a89d24a040af..98bf0e2a2a8d 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1104,39 +1104,42 @@ static int pmic_gpio_remove(struct platform_device *pdev)
}
static const struct of_device_id pmic_gpio_of_match[] = {
- { .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
- { .compatible = "qcom,pm8916-gpio", .data = (void *) 4 },
- { .compatible = "qcom,pm8941-gpio", .data = (void *) 36 },
- /* pm8950 has 8 GPIOs with holes on 3 */
- { .compatible = "qcom,pm8950-gpio", .data = (void *) 8 },
- { .compatible = "qcom,pmi8950-gpio", .data = (void *) 2 },
- { .compatible = "qcom,pm8994-gpio", .data = (void *) 22 },
- { .compatible = "qcom,pmi8994-gpio", .data = (void *) 10 },
- { .compatible = "qcom,pm8998-gpio", .data = (void *) 26 },
- { .compatible = "qcom,pmi8998-gpio", .data = (void *) 14 },
- { .compatible = "qcom,pma8084-gpio", .data = (void *) 22 },
- /* pms405 has 12 GPIOs with holes on 1, 9, and 10 */
- { .compatible = "qcom,pms405-gpio", .data = (void *) 12 },
/* pm660 has 13 GPIOs with holes on 1, 5, 6, 7, 8 and 10 */
{ .compatible = "qcom,pm660-gpio", .data = (void *) 13 },
/* pm660l has 12 GPIOs with holes on 1, 2, 10, 11 and 12 */
{ .compatible = "qcom,pm660l-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm6150-gpio", .data = (void *) 10 },
+ { .compatible = "qcom,pm6150l-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm7325-gpio", .data = (void *) 10 },
+ { .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
+ { .compatible = "qcom,pm8008-gpio", .data = (void *) 2 },
/* pm8150 has 10 GPIOs with holes on 2, 5, 7 and 8 */
{ .compatible = "qcom,pm8150-gpio", .data = (void *) 10 },
+ { .compatible = "qcom,pmc8180-gpio", .data = (void *) 10 },
/* pm8150b has 12 GPIOs with holes on 3, r and 7 */
{ .compatible = "qcom,pm8150b-gpio", .data = (void *) 12 },
/* pm8150l has 12 GPIOs with holes on 7 */
{ .compatible = "qcom,pm8150l-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pmc8180c-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pm8350-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm8350b-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pm8350c-gpio", .data = (void *) 9 },
+ { .compatible = "qcom,pm8916-gpio", .data = (void *) 4 },
+ { .compatible = "qcom,pm8941-gpio", .data = (void *) 36 },
+ /* pm8950 has 8 GPIOs with holes on 3 */
+ { .compatible = "qcom,pm8950-gpio", .data = (void *) 8 },
+ { .compatible = "qcom,pm8994-gpio", .data = (void *) 22 },
+ { .compatible = "qcom,pm8998-gpio", .data = (void *) 26 },
+ { .compatible = "qcom,pma8084-gpio", .data = (void *) 22 },
+ { .compatible = "qcom,pmi8950-gpio", .data = (void *) 2 },
+ { .compatible = "qcom,pmi8994-gpio", .data = (void *) 10 },
+ { .compatible = "qcom,pmi8998-gpio", .data = (void *) 14 },
{ .compatible = "qcom,pmk8350-gpio", .data = (void *) 4 },
- { .compatible = "qcom,pm7325-gpio", .data = (void *) 10 },
+ { .compatible = "qcom,pmm8155au-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pmr735a-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pmr735b-gpio", .data = (void *) 4 },
- { .compatible = "qcom,pm6150-gpio", .data = (void *) 10 },
- { .compatible = "qcom,pm6150l-gpio", .data = (void *) 12 },
- { .compatible = "qcom,pm8008-gpio", .data = (void *) 2 },
+ /* pms405 has 12 GPIOs with holes on 1, 9, and 10 */
+ { .compatible = "qcom,pms405-gpio", .data = (void *) 12 },
/* pmx55 has 11 GPIOs with holes on 3, 7, 10, 11 */
{ .compatible = "qcom,pmx55-gpio", .data = (void *) 11 },
{ },
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 4b84a744ae87..9a72999084b3 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -37,6 +37,7 @@ config PINCTRL_RENESAS
select PINCTRL_PFC_R8A77990 if ARCH_R8A77990
select PINCTRL_PFC_R8A77995 if ARCH_R8A77995
select PINCTRL_PFC_R8A779A0 if ARCH_R8A779A0
+ select PINCTRL_RZG2L if ARCH_R9A07G044
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
select PINCTRL_PFC_SH7269 if CPU_SUBTYPE_SH7269
@@ -176,6 +177,16 @@ config PINCTRL_RZA2
help
This selects GPIO and pinctrl driver for Renesas RZ/A2 platforms.
+config PINCTRL_RZG2L
+ bool "pin control support for RZ/G2L" if COMPILE_TEST
+ depends on OF
+ select GPIOLIB
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ help
+ This selects GPIO and pinctrl driver for Renesas RZ/G2L platforms.
+
config PINCTRL_PFC_R8A77470
bool "pin control support for RZ/G1C" if COMPILE_TEST
select PINCTRL_SH_PFC
diff --git a/drivers/pinctrl/renesas/Makefile b/drivers/pinctrl/renesas/Makefile
index 353563228dc2..7d9238a9ef57 100644
--- a/drivers/pinctrl/renesas/Makefile
+++ b/drivers/pinctrl/renesas/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_PINCTRL_PFC_SHX3) += pfc-shx3.o
obj-$(CONFIG_PINCTRL_RZA1) += pinctrl-rza1.o
obj-$(CONFIG_PINCTRL_RZA2) += pinctrl-rza2.o
+obj-$(CONFIG_PINCTRL_RZG2L) += pinctrl-rzg2l.o
obj-$(CONFIG_PINCTRL_RZN1) += pinctrl-rzn1.o
ifeq ($(CONFIG_COMPILE_TEST),y)
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index 5ccc49b387f1..f2ab02225837 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -571,17 +571,21 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a7794_pinmux_info,
},
#endif
-/* Both r8a7795 entries must be present to make sanity checks work */
-#ifdef CONFIG_PINCTRL_PFC_R8A77950
+/*
+ * Both r8a7795 entries must be present to make sanity checks work, but only
+ * the first entry is actually used.
+ * R-Car H3 ES1.x is matched using soc_device_match() instead.
+ */
+#ifdef CONFIG_PINCTRL_PFC_R8A77951
{
.compatible = "renesas,pfc-r8a7795",
- .data = &r8a77950_pinmux_info,
+ .data = &r8a77951_pinmux_info,
},
#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A77951
+#ifdef CONFIG_PINCTRL_PFC_R8A77950
{
.compatible = "renesas,pfc-r8a7795",
- .data = &r8a77951_pinmux_info,
+ .data = &r8a77950_pinmux_info,
},
#endif
#ifdef CONFIG_PINCTRL_PFC_R8A77960
@@ -1085,26 +1089,20 @@ static inline void sh_pfc_check_driver(struct platform_driver *pdrv) {}
#ifdef CONFIG_OF
static const void *sh_pfc_quirk_match(void)
{
-#if defined(CONFIG_PINCTRL_PFC_R8A77950) || \
- defined(CONFIG_PINCTRL_PFC_R8A77951)
+#ifdef CONFIG_PINCTRL_PFC_R8A77950
const struct soc_device_attribute *match;
static const struct soc_device_attribute quirks[] = {
{
.soc_id = "r8a7795", .revision = "ES1.*",
.data = &r8a77950_pinmux_info,
},
- {
- .soc_id = "r8a7795",
- .data = &r8a77951_pinmux_info,
- },
-
{ /* sentinel */ }
};
match = soc_device_match(quirks);
if (match)
- return match->data ?: ERR_PTR(-ENODEV);
-#endif /* CONFIG_PINCTRL_PFC_R8A77950 || CONFIG_PINCTRL_PFC_R8A77951 */
+ return match->data;
+#endif /* CONFIG_PINCTRL_PFC_R8A77950 */
return NULL;
}
@@ -1119,9 +1117,6 @@ static int sh_pfc_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
if (pdev->dev.of_node) {
info = sh_pfc_quirk_match();
- if (IS_ERR(info))
- return PTR_ERR(info);
-
if (!info)
info = of_device_get_match_data(&pdev->dev);
} else
diff --git a/drivers/pinctrl/renesas/pfc-r8a77995.c b/drivers/pinctrl/renesas/pfc-r8a77995.c
index b479f87a3b23..c56e1e4c13b3 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77995.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77995.c
@@ -14,16 +14,27 @@
#include <linux/errno.h>
#include <linux/kernel.h>
+#include "core.h"
#include "sh_pfc.h"
-#define CPU_ALL_GP(fn, sfx) \
- PORT_GP_9(0, fn, sfx), \
- PORT_GP_32(1, fn, sfx), \
- PORT_GP_32(2, fn, sfx), \
- PORT_GP_CFG_10(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
- PORT_GP_32(4, fn, sfx), \
- PORT_GP_21(5, fn, sfx), \
- PORT_GP_14(6, fn, sfx)
+#define CPU_ALL_GP(fn, sfx) \
+ PORT_GP_CFG_9(0, fn, sfx, SH_PFC_PIN_CFG_PULL_UP_DOWN), \
+ PORT_GP_CFG_32(1, fn, sfx, SH_PFC_PIN_CFG_PULL_UP_DOWN), \
+ PORT_GP_CFG_32(2, fn, sfx, SH_PFC_PIN_CFG_PULL_UP_DOWN), \
+ PORT_GP_CFG_10(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE | SH_PFC_PIN_CFG_PULL_UP_DOWN), \
+ PORT_GP_CFG_32(4, fn, sfx, SH_PFC_PIN_CFG_PULL_UP_DOWN), \
+ PORT_GP_CFG_21(5, fn, sfx, SH_PFC_PIN_CFG_PULL_UP_DOWN), \
+ PORT_GP_CFG_14(6, fn, sfx, SH_PFC_PIN_CFG_PULL_UP_DOWN)
+
+#define CPU_ALL_NOGP(fn) \
+ PIN_NOGP_CFG(DU_DOTCLKIN0, "DU_DOTCLKIN0", fn, SH_PFC_PIN_CFG_PULL_DOWN), \
+ PIN_NOGP_CFG(FSCLKST_N, "FSCLKST#", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN), \
+ PIN_NOGP_CFG(MLB_REF, "MLB_REF", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN), \
+ PIN_NOGP_CFG(PRESETOUT_N, "PRESETOUT#", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN), \
+ PIN_NOGP_CFG(TCK, "TCK", fn, SH_PFC_PIN_CFG_PULL_UP), \
+ PIN_NOGP_CFG(TDI, "TDI", fn, SH_PFC_PIN_CFG_PULL_UP), \
+ PIN_NOGP_CFG(TMS, "TMS", fn, SH_PFC_PIN_CFG_PULL_UP), \
+ PIN_NOGP_CFG(TRST_N, "TRST#", fn, SH_PFC_PIN_CFG_PULL_UP)
/*
* F_() : just information
@@ -930,8 +941,17 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP13_7_4, TPU0TO3_A),
};
+/*
+ * Pins not associated with a GPIO port.
+ */
+enum {
+ GP_ASSIGN_LAST(),
+ NOGP_ALL(),
+};
+
static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
+ PINMUX_NOGP_ALL(),
};
/* - AUDIO CLOCK ------------------------------------------------------------- */
@@ -2834,6 +2854,214 @@ static int r8a77995_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *po
return bit;
}
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
+ [ 0] = RCAR_GP_PIN(1, 9), /* DU_DG1 */
+ [ 1] = RCAR_GP_PIN(1, 8), /* DU_DG0 */
+ [ 2] = RCAR_GP_PIN(1, 7), /* DU_DB7 */
+ [ 3] = RCAR_GP_PIN(1, 6), /* DU_DB6 */
+ [ 4] = RCAR_GP_PIN(1, 5), /* DU_DB5 */
+ [ 5] = RCAR_GP_PIN(1, 4), /* DU_DB4 */
+ [ 6] = RCAR_GP_PIN(1, 3), /* DU_DB3 */
+ [ 7] = RCAR_GP_PIN(1, 2), /* DU_DB2 */
+ [ 8] = RCAR_GP_PIN(1, 1), /* DU_DB1 */
+ [ 9] = RCAR_GP_PIN(1, 0), /* DU_DB0 */
+ [10] = PIN_MLB_REF, /* MLB_REF */
+ [11] = RCAR_GP_PIN(0, 8), /* MLB_SIG */
+ [12] = RCAR_GP_PIN(0, 7), /* MLB_DAT */
+ [13] = RCAR_GP_PIN(0, 6), /* MLB_CLK */
+ [14] = RCAR_GP_PIN(0, 5), /* MSIOF2_RXD */
+ [15] = RCAR_GP_PIN(0, 4), /* MSIOF2_TXD */
+ [16] = RCAR_GP_PIN(0, 3), /* MSIOF2_SCK */
+ [17] = RCAR_GP_PIN(0, 2), /* IRQ0_A */
+ [18] = RCAR_GP_PIN(0, 1), /* USB0_OVC */
+ [19] = RCAR_GP_PIN(0, 0), /* USB0_PWEN */
+ [20] = PIN_PRESETOUT_N, /* PRESETOUT# */
+ [21] = PIN_DU_DOTCLKIN0, /* DU_DOTCLKIN0 */
+ [22] = PIN_FSCLKST_N, /* FSCLKST# */
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = PIN_TDI, /* TDI */
+ [29] = PIN_TMS, /* TMS */
+ [30] = PIN_TCK, /* TCK */
+ [31] = PIN_TRST_N, /* TRST# */
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
+ [ 0] = RCAR_GP_PIN(2, 9), /* VI4_DATA8 */
+ [ 1] = RCAR_GP_PIN(2, 8), /* VI4_DATA7 */
+ [ 2] = RCAR_GP_PIN(2, 7), /* VI4_DATA6 */
+ [ 3] = RCAR_GP_PIN(2, 6), /* VI4_DATA5 */
+ [ 4] = RCAR_GP_PIN(2, 5), /* VI4_DATA4 */
+ [ 5] = RCAR_GP_PIN(2, 4), /* VI4_DATA3 */
+ [ 6] = RCAR_GP_PIN(2, 3), /* VI4_DATA2 */
+ [ 7] = RCAR_GP_PIN(2, 2), /* VI4_DATA1 */
+ [ 8] = RCAR_GP_PIN(2, 1), /* VI4_DATA0 */
+ [ 9] = RCAR_GP_PIN(2, 0), /* VI4_CLK */
+ [10] = RCAR_GP_PIN(1, 31), /* QPOLB */
+ [11] = RCAR_GP_PIN(1, 30), /* QPOLA */
+ [12] = RCAR_GP_PIN(1, 29), /* DU_CDE */
+ [13] = RCAR_GP_PIN(1, 28), /* DU_DISP/CDE */
+ [14] = RCAR_GP_PIN(1, 27), /* DU_DISP */
+ [15] = RCAR_GP_PIN(1, 26), /* DU_VSYNC */
+ [16] = RCAR_GP_PIN(1, 25), /* DU_HSYNC */
+ [17] = RCAR_GP_PIN(1, 24), /* DU_DOTCLKOUT0 */
+ [18] = RCAR_GP_PIN(1, 23), /* DU_DR7 */
+ [19] = RCAR_GP_PIN(1, 22), /* DU_DR6 */
+ [20] = RCAR_GP_PIN(1, 21), /* DU_DR5 */
+ [21] = RCAR_GP_PIN(1, 20), /* DU_DR4 */
+ [22] = RCAR_GP_PIN(1, 19), /* DU_DR3 */
+ [23] = RCAR_GP_PIN(1, 18), /* DU_DR2 */
+ [24] = RCAR_GP_PIN(1, 17), /* DU_DR1 */
+ [25] = RCAR_GP_PIN(1, 16), /* DU_DR0 */
+ [26] = RCAR_GP_PIN(1, 15), /* DU_DG7 */
+ [27] = RCAR_GP_PIN(1, 14), /* DU_DG6 */
+ [28] = RCAR_GP_PIN(1, 13), /* DU_DG5 */
+ [29] = RCAR_GP_PIN(1, 12), /* DU_DG4 */
+ [30] = RCAR_GP_PIN(1, 11), /* DU_DG3 */
+ [31] = RCAR_GP_PIN(1, 10), /* DU_DG2 */
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
+ [ 0] = RCAR_GP_PIN(3, 8), /* NFDATA6 */
+ [ 1] = RCAR_GP_PIN(3, 7), /* NFDATA5 */
+ [ 2] = RCAR_GP_PIN(3, 6), /* NFDATA4 */
+ [ 3] = RCAR_GP_PIN(3, 5), /* NFDATA3 */
+ [ 4] = RCAR_GP_PIN(3, 4), /* NFDATA2 */
+ [ 5] = RCAR_GP_PIN(3, 3), /* NFDATA1 */
+ [ 6] = RCAR_GP_PIN(3, 2), /* NFDATA0 */
+ [ 7] = RCAR_GP_PIN(3, 1), /* NFWE# (PUEN) / NFRE# (PUD) */
+ [ 8] = RCAR_GP_PIN(3, 0), /* NFRE# (PUEN) / NFWE# (PUD) */
+ [ 9] = RCAR_GP_PIN(4, 0), /* NFRB# */
+ [10] = RCAR_GP_PIN(2, 31), /* NFCE# */
+ [11] = RCAR_GP_PIN(2, 30), /* NFCLE */
+ [12] = RCAR_GP_PIN(2, 29), /* NFALE */
+ [13] = RCAR_GP_PIN(2, 28), /* VI4_CLKENB */
+ [14] = RCAR_GP_PIN(2, 27), /* VI4_FIELD */
+ [15] = RCAR_GP_PIN(2, 26), /* VI4_HSYNC# */
+ [16] = RCAR_GP_PIN(2, 25), /* VI4_VSYNC# */
+ [17] = RCAR_GP_PIN(2, 24), /* VI4_DATA23 */
+ [18] = RCAR_GP_PIN(2, 23), /* VI4_DATA22 */
+ [19] = RCAR_GP_PIN(2, 22), /* VI4_DATA21 */
+ [20] = RCAR_GP_PIN(2, 21), /* VI4_DATA20 */
+ [21] = RCAR_GP_PIN(2, 20), /* VI4_DATA19 */
+ [22] = RCAR_GP_PIN(2, 19), /* VI4_DATA18 */
+ [23] = RCAR_GP_PIN(2, 18), /* VI4_DATA17 */
+ [24] = RCAR_GP_PIN(2, 17), /* VI4_DATA16 */
+ [25] = RCAR_GP_PIN(2, 16), /* VI4_DATA15 */
+ [26] = RCAR_GP_PIN(2, 15), /* VI4_DATA14 */
+ [27] = RCAR_GP_PIN(2, 14), /* VI4_DATA13 */
+ [28] = RCAR_GP_PIN(2, 13), /* VI4_DATA12 */
+ [29] = RCAR_GP_PIN(2, 12), /* VI4_DATA11 */
+ [30] = RCAR_GP_PIN(2, 11), /* VI4_DATA10 */
+ [31] = RCAR_GP_PIN(2, 10), /* VI4_DATA9 */
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+ [ 0] = RCAR_GP_PIN(4, 31), /* CAN0_RX_A */
+ [ 1] = RCAR_GP_PIN(5, 2), /* CAN_CLK */
+ [ 2] = RCAR_GP_PIN(5, 1), /* TPU0TO1_A */
+ [ 3] = RCAR_GP_PIN(5, 0), /* TPU0TO0_A */
+ [ 4] = RCAR_GP_PIN(4, 27), /* TX2 */
+ [ 5] = RCAR_GP_PIN(4, 26), /* RX2 */
+ [ 6] = RCAR_GP_PIN(4, 25), /* SCK2 */
+ [ 7] = RCAR_GP_PIN(4, 24), /* TX1_A */
+ [ 8] = RCAR_GP_PIN(4, 23), /* RX1_A */
+ [ 9] = RCAR_GP_PIN(4, 22), /* SCK1_A */
+ [10] = RCAR_GP_PIN(4, 21), /* TX0_A */
+ [11] = RCAR_GP_PIN(4, 20), /* RX0_A */
+ [12] = RCAR_GP_PIN(4, 19), /* SCK0_A */
+ [13] = RCAR_GP_PIN(4, 18), /* MSIOF1_RXD */
+ [14] = RCAR_GP_PIN(4, 17), /* MSIOF1_TXD */
+ [15] = RCAR_GP_PIN(4, 16), /* MSIOF1_SCK */
+ [16] = RCAR_GP_PIN(4, 15), /* MSIOF0_RXD */
+ [17] = RCAR_GP_PIN(4, 14), /* MSIOF0_TXD */
+ [18] = RCAR_GP_PIN(4, 13), /* MSIOF0_SYNC */
+ [19] = RCAR_GP_PIN(4, 12), /* MSIOF0_SCK */
+ [20] = RCAR_GP_PIN(4, 11), /* SDA1 */
+ [21] = RCAR_GP_PIN(4, 10), /* SCL1 */
+ [22] = RCAR_GP_PIN(4, 9), /* SDA0 */
+ [23] = RCAR_GP_PIN(4, 8), /* SCL0 */
+ [24] = RCAR_GP_PIN(4, 7), /* SSI_WS4_A */
+ [25] = RCAR_GP_PIN(4, 6), /* SSI_SDATA4_A */
+ [26] = RCAR_GP_PIN(4, 5), /* SSI_SCK4_A */
+ [27] = RCAR_GP_PIN(4, 4), /* SSI_WS34 */
+ [28] = RCAR_GP_PIN(4, 3), /* SSI_SDATA3 */
+ [29] = RCAR_GP_PIN(4, 2), /* SSI_SCK34 */
+ [30] = RCAR_GP_PIN(4, 1), /* AUDIO_CLKA */
+ [31] = RCAR_GP_PIN(3, 9), /* NFDATA7 */
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
+ [ 0] = RCAR_GP_PIN(6, 10), /* QSPI1_IO3 */
+ [ 1] = RCAR_GP_PIN(6, 9), /* QSPI1_IO2 */
+ [ 2] = RCAR_GP_PIN(6, 8), /* QSPI1_MISO_IO1 */
+ [ 3] = RCAR_GP_PIN(6, 7), /* QSPI1_MOSI_IO0 */
+ [ 4] = RCAR_GP_PIN(6, 6), /* QSPI1_SPCLK */
+ [ 5] = RCAR_GP_PIN(6, 5), /* QSPI0_SSL */
+ [ 6] = RCAR_GP_PIN(6, 4), /* QSPI0_IO3 */
+ [ 7] = RCAR_GP_PIN(6, 3), /* QSPI0_IO2 */
+ [ 8] = RCAR_GP_PIN(6, 2), /* QSPI0_MISO_IO1 */
+ [ 9] = RCAR_GP_PIN(6, 1), /* QSPI0_MOSI_IO0 */
+ [10] = RCAR_GP_PIN(6, 0), /* QSPI0_SPCLK */
+ [11] = RCAR_GP_PIN(5, 20), /* AVB0_LINK */
+ [12] = RCAR_GP_PIN(5, 19), /* AVB0_PHY_INT */
+ [13] = RCAR_GP_PIN(5, 18), /* AVB0_MAGIC */
+ [14] = RCAR_GP_PIN(5, 17), /* AVB0_MDC */
+ [15] = RCAR_GP_PIN(5, 16), /* AVB0_MDIO */
+ [16] = RCAR_GP_PIN(5, 15), /* AVB0_TXCREFCLK */
+ [17] = RCAR_GP_PIN(5, 14), /* AVB0_TD3 */
+ [18] = RCAR_GP_PIN(5, 13), /* AVB0_TD2 */
+ [19] = RCAR_GP_PIN(5, 12), /* AVB0_TD1 */
+ [20] = RCAR_GP_PIN(5, 11), /* AVB0_TD0 */
+ [21] = RCAR_GP_PIN(5, 10), /* AVB0_TXC */
+ [22] = RCAR_GP_PIN(5, 9), /* AVB0_TX_CTL */
+ [23] = RCAR_GP_PIN(5, 8), /* AVB0_RD3 */
+ [24] = RCAR_GP_PIN(5, 7), /* AVB0_RD2 */
+ [25] = RCAR_GP_PIN(5, 6), /* AVB0_RD1 */
+ [26] = RCAR_GP_PIN(5, 5), /* AVB0_RD0 */
+ [27] = RCAR_GP_PIN(5, 4), /* AVB0_RXC */
+ [28] = RCAR_GP_PIN(5, 3), /* AVB0_RX_CTL */
+ [29] = RCAR_GP_PIN(4, 30), /* CAN1_TX_A */
+ [30] = RCAR_GP_PIN(4, 29), /* CAN1_RX_A */
+ [31] = RCAR_GP_PIN(4, 28), /* CAN0_TX_A */
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD4", 0xe6060454) {
+ [ 0] = SH_PFC_PIN_NONE,
+ [ 1] = SH_PFC_PIN_NONE,
+ [ 2] = SH_PFC_PIN_NONE,
+ [ 3] = SH_PFC_PIN_NONE,
+ [ 4] = SH_PFC_PIN_NONE,
+ [ 5] = SH_PFC_PIN_NONE,
+ [ 6] = SH_PFC_PIN_NONE,
+ [ 7] = SH_PFC_PIN_NONE,
+ [ 8] = SH_PFC_PIN_NONE,
+ [ 9] = SH_PFC_PIN_NONE,
+ [10] = SH_PFC_PIN_NONE,
+ [11] = SH_PFC_PIN_NONE,
+ [12] = SH_PFC_PIN_NONE,
+ [13] = SH_PFC_PIN_NONE,
+ [14] = SH_PFC_PIN_NONE,
+ [15] = SH_PFC_PIN_NONE,
+ [16] = SH_PFC_PIN_NONE,
+ [17] = SH_PFC_PIN_NONE,
+ [18] = SH_PFC_PIN_NONE,
+ [19] = SH_PFC_PIN_NONE,
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = RCAR_GP_PIN(6, 13), /* RPC_INT# */
+ [30] = RCAR_GP_PIN(6, 12), /* RPC_RESET# */
+ [31] = RCAR_GP_PIN(6, 11), /* QSPI1_SSL */
+ } },
+ { /* sentinel */ }
+};
+
enum ioctrl_regs {
TDSELCTRL,
};
@@ -2843,8 +3071,83 @@ static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
{ /* sentinel */ },
};
+static const struct pinmux_bias_reg *
+r8a77995_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
+ unsigned int *puen_bit, unsigned int *pud_bit)
+{
+ const struct pinmux_bias_reg *reg;
+ unsigned int bit;
+
+ reg = rcar_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
+ return reg;
+
+ *puen_bit = bit;
+
+ /* NFWE# and NFRE# use different bit positions in PUD2 */
+ switch (pin) {
+ case RCAR_GP_PIN(3, 0): /* NFRE# */
+ *pud_bit = 7;
+ break;
+
+ case RCAR_GP_PIN(3, 1): /* NFWE# */
+ *pud_bit = 8;
+ break;
+
+ default:
+ *pud_bit = bit;
+ break;
+ }
+
+ return reg;
+}
+
+static unsigned int r8a77995_pinmux_get_bias(struct sh_pfc *pfc,
+ unsigned int pin)
+{
+ const struct pinmux_bias_reg *reg;
+ unsigned int puen_bit, pud_bit;
+
+ reg = r8a77995_pin_to_bias_reg(pfc, pin, &puen_bit, &pud_bit);
+ if (!reg)
+ return PIN_CONFIG_BIAS_DISABLE;
+
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(puen_bit)))
+ return PIN_CONFIG_BIAS_DISABLE;
+ else if (sh_pfc_read(pfc, reg->pud) & BIT(pud_bit))
+ return PIN_CONFIG_BIAS_PULL_UP;
+ else
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+}
+
+static void r8a77995_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
+{
+ const struct pinmux_bias_reg *reg;
+ unsigned int puen_bit, pud_bit;
+ u32 enable, updown;
+
+ reg = r8a77995_pin_to_bias_reg(pfc, pin, &puen_bit, &pud_bit);
+ if (!reg)
+ return;
+
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(puen_bit);
+ if (bias != PIN_CONFIG_BIAS_DISABLE) {
+ enable |= BIT(puen_bit);
+
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(pud_bit);
+ if (bias == PIN_CONFIG_BIAS_PULL_UP)
+ updown |= BIT(pud_bit);
+
+ sh_pfc_write(pfc, reg->pud, updown);
+ }
+ sh_pfc_write(pfc, reg->puen, enable);
+}
+
static const struct sh_pfc_soc_operations r8a77995_pinmux_ops = {
.pin_to_pocctrl = r8a77995_pin_to_pocctrl,
+ .get_bias = r8a77995_pinmux_get_bias,
+ .set_bias = r8a77995_pinmux_set_bias,
};
const struct sh_pfc_soc_info r8a77995_pinmux_info = {
@@ -2862,6 +3165,7 @@ const struct sh_pfc_soc_info r8a77995_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions),
.cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
.ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
new file mode 100644
index 000000000000..dbf2f521bb27
--- /dev/null
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -0,0 +1,1175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2L Pin Control and GPIO driver core
+ *
+ * Copyright (C) 2021 Renesas Electronics Corporation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
+
+#define DRV_NAME "pinctrl-rzg2l"
+
+/*
+ * Use 16 lower bits [15:0] for pin identifier
+ * Use 16 higher bits [31:16] for pin mux function
+ */
+#define MUX_PIN_ID_MASK GENMASK(15, 0)
+#define MUX_FUNC_MASK GENMASK(31, 16)
+#define MUX_FUNC_OFFS 16
+#define MUX_FUNC(pinconf) (((pinconf) & MUX_FUNC_MASK) >> MUX_FUNC_OFFS)
+
+/* PIN capabilities */
+#define PIN_CFG_IOLH BIT(0)
+#define PIN_CFG_SR BIT(1)
+#define PIN_CFG_IEN BIT(2)
+#define PIN_CFG_PUPD BIT(3)
+#define PIN_CFG_IOLH_SD0 BIT(4)
+#define PIN_CFG_IOLH_SD1 BIT(5)
+#define PIN_CFG_IOLH_QSPI BIT(6)
+#define PIN_CFG_IOLH_ETH0 BIT(7)
+#define PIN_CFG_IOLH_ETH1 BIT(8)
+#define PIN_CFG_FILONOFF BIT(9)
+#define PIN_CFG_FILNUM BIT(10)
+#define PIN_CFG_FILCLKSEL BIT(11)
+
+#define RZG2L_MPXED_PIN_FUNCS (PIN_CFG_IOLH | \
+ PIN_CFG_SR | \
+ PIN_CFG_PUPD | \
+ PIN_CFG_FILONOFF | \
+ PIN_CFG_FILNUM | \
+ PIN_CFG_FILCLKSEL)
+
+#define RZG2L_MPXED_ETH_PIN_FUNCS(x) ((x) | \
+ PIN_CFG_FILONOFF | \
+ PIN_CFG_FILNUM | \
+ PIN_CFG_FILCLKSEL)
+
+/*
+ * n indicates number of pins in the port, a is the register index
+ * and f is pin configuration capabilities supported.
+ */
+#define RZG2L_GPIO_PORT_PACK(n, a, f) (((n) << 28) | ((a) << 20) | (f))
+#define RZG2L_GPIO_PORT_GET_PINCNT(x) (((x) & GENMASK(30, 28)) >> 28)
+#define RZG2L_GPIO_PORT_GET_INDEX(x) (((x) & GENMASK(26, 20)) >> 20)
+#define RZG2L_GPIO_PORT_GET_CFGS(x) ((x) & GENMASK(19, 0))
+
+/*
+ * BIT(31) indicates dedicated pin, p is the register index while
+ * referencing to SR/IEN/IOLH/FILxx registers, b is the register bits
+ * (b * 8) and f is the pin configuration capabilities supported.
+ */
+#define RZG2L_SINGLE_PIN BIT(31)
+#define RZG2L_SINGLE_PIN_PACK(p, b, f) (RZG2L_SINGLE_PIN | \
+ ((p) << 24) | ((b) << 20) | (f))
+#define RZG2L_SINGLE_PIN_GET_PORT(x) (((x) & GENMASK(30, 24)) >> 24)
+#define RZG2L_SINGLE_PIN_GET_BIT(x) (((x) & GENMASK(22, 20)) >> 20)
+#define RZG2L_SINGLE_PIN_GET_CFGS(x) ((x) & GENMASK(19, 0))
+
+#define P(n) (0x0000 + 0x10 + (n))
+#define PM(n) (0x0100 + 0x20 + (n) * 2)
+#define PMC(n) (0x0200 + 0x10 + (n))
+#define PFC(n) (0x0400 + 0x40 + (n) * 4)
+#define PIN(n) (0x0800 + 0x10 + (n))
+#define IEN(n) (0x1800 + (n) * 8)
+#define PWPR (0x3014)
+#define SD_CH(n) (0x3000 + (n) * 4)
+#define QSPI (0x3008)
+
+#define PVDD_1800 1 /* I/O domain voltage <= 1.8V */
+#define PVDD_3300 0 /* I/O domain voltage >= 3.3V */
+
+#define PWPR_B0WI BIT(7) /* Bit Write Disable */
+#define PWPR_PFCWE BIT(6) /* PFC Register Write Enable */
+
+#define PM_MASK 0x03
+#define PVDD_MASK 0x01
+#define PFC_MASK 0x07
+#define IEN_MASK 0x01
+
+#define PM_INPUT 0x1
+#define PM_OUTPUT 0x2
+
+#define RZG2L_PIN_ID_TO_PORT(id) ((id) / RZG2L_PINS_PER_PORT)
+#define RZG2L_PIN_ID_TO_PIN(id) ((id) % RZG2L_PINS_PER_PORT)
+
+struct rzg2l_dedicated_configs {
+ const char *name;
+ u32 config;
+};
+
+struct rzg2l_pinctrl_data {
+ const char * const *port_pins;
+ const u32 *port_pin_configs;
+ struct rzg2l_dedicated_configs *dedicated_pins;
+ unsigned int n_port_pins;
+ unsigned int n_dedicated_pins;
+};
+
+struct rzg2l_pinctrl {
+ struct pinctrl_dev *pctl;
+ struct pinctrl_desc desc;
+ struct pinctrl_pin_desc *pins;
+
+ const struct rzg2l_pinctrl_data *data;
+ void __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+
+ struct gpio_chip gpio_chip;
+ struct pinctrl_gpio_range gpio_range;
+
+ spinlock_t lock;
+};
+
+static void rzg2l_pinctrl_set_pfc_mode(struct rzg2l_pinctrl *pctrl,
+ u8 port, u8 pin, u8 func)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ /* Set pin to 'Non-use (Hi-Z input protection)' */
+ reg = readw(pctrl->base + PM(port));
+ reg &= ~(PM_MASK << (pin * 2));
+ writew(reg, pctrl->base + PM(port));
+
+ /* Temporarily switch to GPIO mode with PMC register */
+ reg = readb(pctrl->base + PMC(port));
+ writeb(reg & ~BIT(pin), pctrl->base + PMC(port));
+
+ /* Set the PWPR register to allow PFC register to write */
+ writel(0x0, pctrl->base + PWPR); /* B0WI=0, PFCWE=0 */
+ writel(PWPR_PFCWE, pctrl->base + PWPR); /* B0WI=0, PFCWE=1 */
+
+ /* Select Pin function mode with PFC register */
+ reg = readl(pctrl->base + PFC(port));
+ reg &= ~(PFC_MASK << (pin * 4));
+ writel(reg | (func << (pin * 4)), pctrl->base + PFC(port));
+
+ /* Set the PWPR register to be write-protected */
+ writel(0x0, pctrl->base + PWPR); /* B0WI=0, PFCWE=0 */
+ writel(PWPR_B0WI, pctrl->base + PWPR); /* B0WI=1, PFCWE=0 */
+
+ /* Switch to Peripheral pin function with PMC register */
+ reg = readb(pctrl->base + PMC(port));
+ writeb(reg | BIT(pin), pctrl->base + PMC(port));
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+};
+
+static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct rzg2l_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct function_desc *func;
+ unsigned int i, *psel_val;
+ struct group_desc *group;
+ int *pins;
+
+ func = pinmux_generic_get_function(pctldev, func_selector);
+ if (!func)
+ return -EINVAL;
+ group = pinctrl_generic_get_group(pctldev, group_selector);
+ if (!group)
+ return -EINVAL;
+
+ psel_val = func->data;
+ pins = group->pins;
+
+ for (i = 0; i < group->num_pins; i++) {
+ dev_dbg(pctrl->dev, "port:%u pin: %u PSEL:%u\n",
+ RZG2L_PIN_ID_TO_PORT(pins[i]), RZG2L_PIN_ID_TO_PIN(pins[i]),
+ psel_val[i]);
+ rzg2l_pinctrl_set_pfc_mode(pctrl, RZG2L_PIN_ID_TO_PORT(pins[i]),
+ RZG2L_PIN_ID_TO_PIN(pins[i]), psel_val[i]);
+ }
+
+ return 0;
+};
+
+static int rzg2l_map_add_config(struct pinctrl_map *map,
+ const char *group_or_pin,
+ enum pinctrl_map_type type,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ unsigned long *cfgs;
+
+ cfgs = kmemdup(configs, num_configs * sizeof(*cfgs),
+ GFP_KERNEL);
+ if (!cfgs)
+ return -ENOMEM;
+
+ map->type = type;
+ map->data.configs.group_or_pin = group_or_pin;
+ map->data.configs.configs = cfgs;
+ map->data.configs.num_configs = num_configs;
+
+ return 0;
+}
+
+static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps,
+ unsigned int *index)
+{
+ struct rzg2l_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct pinctrl_map *maps = *map;
+ unsigned int nmaps = *num_maps;
+ unsigned long *configs = NULL;
+ unsigned int *pins, *psel_val;
+ unsigned int num_pinmux = 0;
+ unsigned int idx = *index;
+ unsigned int num_pins, i;
+ unsigned int num_configs;
+ struct property *pinmux;
+ struct property *prop;
+ int ret, gsel, fsel;
+ const char **pin_fn;
+ const char *pin;
+
+ pinmux = of_find_property(np, "pinmux", NULL);
+ if (pinmux)
+ num_pinmux = pinmux->length / sizeof(u32);
+
+ ret = of_property_count_strings(np, "pins");
+ if (ret == -EINVAL) {
+ num_pins = 0;
+ } else if (ret < 0) {
+ dev_err(pctrl->dev, "Invalid pins list in DT\n");
+ return ret;
+ } else {
+ num_pins = ret;
+ }
+
+ if (!num_pinmux && !num_pins)
+ return 0;
+
+ if (num_pinmux && num_pins) {
+ dev_err(pctrl->dev,
+ "DT node must contain either a pinmux or pins and not both\n");
+ return -EINVAL;
+ }
+
+ ret = pinconf_generic_parse_dt_config(np, NULL, &configs, &num_configs);
+ if (ret < 0)
+ return ret;
+
+ if (num_pins && !num_configs) {
+ dev_err(pctrl->dev, "DT node must contain a config\n");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (num_pinmux)
+ nmaps += 1;
+
+ if (num_pins)
+ nmaps += num_pins;
+
+ maps = krealloc_array(maps, nmaps, sizeof(*maps), GFP_KERNEL);
+ if (!maps) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ *map = maps;
+ *num_maps = nmaps;
+ if (num_pins) {
+ of_property_for_each_string(np, "pins", prop, pin) {
+ ret = rzg2l_map_add_config(&maps[idx], pin,
+ PIN_MAP_TYPE_CONFIGS_PIN,
+ configs, num_configs);
+ if (ret < 0)
+ goto done;
+
+ idx++;
+ }
+ ret = 0;
+ goto done;
+ }
+
+ pins = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*pins), GFP_KERNEL);
+ psel_val = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*psel_val),
+ GFP_KERNEL);
+ pin_fn = devm_kzalloc(pctrl->dev, sizeof(*pin_fn), GFP_KERNEL);
+ if (!pins || !psel_val || !pin_fn) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /* Collect pin locations and mux settings from DT properties */
+ for (i = 0; i < num_pinmux; ++i) {
+ u32 value;
+
+ ret = of_property_read_u32_index(np, "pinmux", i, &value);
+ if (ret)
+ goto done;
+ pins[i] = value & MUX_PIN_ID_MASK;
+ psel_val[i] = MUX_FUNC(value);
+ }
+
+ /* Register a single pin group listing all the pins we read from DT */
+ gsel = pinctrl_generic_add_group(pctldev, np->name, pins, num_pinmux, NULL);
+ if (gsel < 0) {
+ ret = gsel;
+ goto done;
+ }
+
+ /*
+ * Register a single group function where the 'data' is an array PSEL
+ * register values read from DT.
+ */
+ pin_fn[0] = np->name;
+ fsel = pinmux_generic_add_function(pctldev, np->name, pin_fn, 1,
+ psel_val);
+ if (fsel < 0) {
+ ret = fsel;
+ goto remove_group;
+ }
+
+ maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
+ maps[idx].data.mux.group = np->name;
+ maps[idx].data.mux.function = np->name;
+ idx++;
+
+ dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
+ ret = 0;
+ goto done;
+
+remove_group:
+ pinctrl_generic_remove_group(pctldev, gsel);
+done:
+ *index = idx;
+ kfree(configs);
+ return ret;
+}
+
+static void rzg2l_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map,
+ unsigned int num_maps)
+{
+ unsigned int i;
+
+ if (!map)
+ return;
+
+ for (i = 0; i < num_maps; ++i) {
+ if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP ||
+ map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+ kfree(map[i].data.configs.configs);
+ }
+ kfree(map);
+}
+
+static int rzg2l_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct rzg2l_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct device_node *child;
+ unsigned int index;
+ int ret;
+
+ *map = NULL;
+ *num_maps = 0;
+ index = 0;
+
+ for_each_child_of_node(np, child) {
+ ret = rzg2l_dt_subnode_to_map(pctldev, child, map,
+ num_maps, &index);
+ if (ret < 0) {
+ of_node_put(child);
+ goto done;
+ }
+ }
+
+ if (*num_maps == 0) {
+ ret = rzg2l_dt_subnode_to_map(pctldev, np, map,
+ num_maps, &index);
+ if (ret < 0)
+ goto done;
+ }
+
+ if (*num_maps)
+ return 0;
+
+ dev_err(pctrl->dev, "no mapping found in node %pOF\n", np);
+ ret = -EINVAL;
+
+done:
+ if (ret < 0)
+ rzg2l_dt_free_map(pctldev, *map, *num_maps);
+
+ return ret;
+}
+
+static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int _pin,
+ unsigned long *config)
+{
+ struct rzg2l_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
+ unsigned int *pin_data = pin->drv_data;
+ unsigned int arg = 0;
+ unsigned long flags;
+ void __iomem *addr;
+ u32 port = 0, reg;
+ u32 cfg = 0;
+ u8 bit = 0;
+
+ if (!pin_data)
+ return -EINVAL;
+
+ if (*pin_data & RZG2L_SINGLE_PIN) {
+ port = RZG2L_SINGLE_PIN_GET_PORT(*pin_data);
+ cfg = RZG2L_SINGLE_PIN_GET_CFGS(*pin_data);
+ bit = RZG2L_SINGLE_PIN_GET_BIT(*pin_data);
+ }
+
+ switch (param) {
+ case PIN_CONFIG_INPUT_ENABLE:
+ if (!(cfg & PIN_CFG_IEN))
+ return -EINVAL;
+ spin_lock_irqsave(&pctrl->lock, flags);
+ /* handle _L/_H for 32-bit register read/write */
+ addr = pctrl->base + IEN(port);
+ if (bit >= 4) {
+ bit -= 4;
+ addr += 4;
+ }
+
+ reg = readl(addr) & (IEN_MASK << (bit * 8));
+ arg = (reg >> (bit * 8)) & 0x1;
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ break;
+
+ case PIN_CONFIG_POWER_SOURCE: {
+ u32 pwr_reg = 0x0;
+
+ if (cfg & PIN_CFG_IOLH_SD0)
+ pwr_reg = SD_CH(0);
+ else if (cfg & PIN_CFG_IOLH_SD1)
+ pwr_reg = SD_CH(1);
+ else if (cfg & PIN_CFG_IOLH_QSPI)
+ pwr_reg = QSPI;
+ else
+ return -EINVAL;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ addr = pctrl->base + pwr_reg;
+ arg = (readl(addr) & PVDD_MASK) ? 1800 : 3300;
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ break;
+ }
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+};
+
+static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int _pin,
+ unsigned long *_configs,
+ unsigned int num_configs)
+{
+ struct rzg2l_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
+ unsigned int *pin_data = pin->drv_data;
+ enum pin_config_param param;
+ unsigned long flags;
+ void __iomem *addr;
+ u32 port = 0, reg;
+ unsigned int i;
+ u32 cfg = 0;
+ u8 bit = 0;
+
+ if (!pin_data)
+ return -EINVAL;
+
+ if (*pin_data & RZG2L_SINGLE_PIN) {
+ port = RZG2L_SINGLE_PIN_GET_PORT(*pin_data);
+ cfg = RZG2L_SINGLE_PIN_GET_CFGS(*pin_data);
+ bit = RZG2L_SINGLE_PIN_GET_BIT(*pin_data);
+ }
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(_configs[i]);
+ switch (param) {
+ case PIN_CONFIG_INPUT_ENABLE: {
+ unsigned int arg =
+ pinconf_to_config_argument(_configs[i]);
+
+ if (!(cfg & PIN_CFG_IEN))
+ return -EINVAL;
+
+ /* handle _L/_H for 32-bit register read/write */
+ addr = pctrl->base + IEN(port);
+ if (bit >= 4) {
+ bit -= 4;
+ addr += 4;
+ }
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ reg = readl(addr) & ~(IEN_MASK << (bit * 8));
+ writel(reg | (arg << (bit * 8)), addr);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ break;
+ }
+
+ case PIN_CONFIG_POWER_SOURCE: {
+ unsigned int mV = pinconf_to_config_argument(_configs[i]);
+ u32 pwr_reg = 0x0;
+
+ if (mV != 1800 && mV != 3300)
+ return -EINVAL;
+
+ if (cfg & PIN_CFG_IOLH_SD0)
+ pwr_reg = SD_CH(0);
+ else if (cfg & PIN_CFG_IOLH_SD1)
+ pwr_reg = SD_CH(1);
+ else if (cfg & PIN_CFG_IOLH_QSPI)
+ pwr_reg = QSPI;
+ else
+ return -EINVAL;
+
+ addr = pctrl->base + pwr_reg;
+ spin_lock_irqsave(&pctrl->lock, flags);
+ writel((mV == 1800) ? PVDD_1800 : PVDD_3300, addr);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int rzg2l_pinctrl_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ const unsigned int *pins;
+ unsigned int i, npins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = rzg2l_pinctrl_pinconf_set(pctldev, pins[i], configs,
+ num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+};
+
+static int rzg2l_pinctrl_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *config)
+{
+ const unsigned int *pins;
+ unsigned int i, npins, prev_config = 0;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = rzg2l_pinctrl_pinconf_get(pctldev, pins[i], config);
+ if (ret)
+ return ret;
+
+ /* Check config matching between to pin */
+ if (i && prev_config != *config)
+ return -EOPNOTSUPP;
+
+ prev_config = *config;
+ }
+
+ return 0;
+};
+
+static const struct pinctrl_ops rzg2l_pinctrl_pctlops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = rzg2l_dt_node_to_map,
+ .dt_free_map = rzg2l_dt_free_map,
+};
+
+static const struct pinmux_ops rzg2l_pinctrl_pmxops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = rzg2l_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct pinconf_ops rzg2l_pinctrl_confops = {
+ .is_generic = true,
+ .pin_config_get = rzg2l_pinctrl_pinconf_get,
+ .pin_config_set = rzg2l_pinctrl_pinconf_set,
+ .pin_config_group_set = rzg2l_pinctrl_pinconf_group_set,
+ .pin_config_group_get = rzg2l_pinctrl_pinconf_group_get,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static int rzg2l_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZG2L_PIN_ID_TO_PORT(offset);
+ u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
+ unsigned long flags;
+ u8 reg8;
+ int ret;
+
+ ret = pinctrl_gpio_request(chip->base + offset);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ /* Select GPIO mode in PMC Register */
+ reg8 = readb(pctrl->base + PMC(port));
+ reg8 &= ~BIT(bit);
+ writeb(reg8, pctrl->base + PMC(port));
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static void rzg2l_gpio_set_direction(struct rzg2l_pinctrl *pctrl, u32 port,
+ u8 bit, bool output)
+{
+ unsigned long flags;
+ u16 reg16;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ reg16 = readw(pctrl->base + PM(port));
+ reg16 &= ~(PM_MASK << (bit * 2));
+
+ reg16 |= (output ? PM_OUTPUT : PM_INPUT) << (bit * 2);
+ writew(reg16, pctrl->base + PM(port));
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int rzg2l_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZG2L_PIN_ID_TO_PORT(offset);
+ u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
+
+ if (!(readb(pctrl->base + PMC(port)) & BIT(bit))) {
+ u16 reg16;
+
+ reg16 = readw(pctrl->base + PM(port));
+ reg16 = (reg16 >> (bit * 2)) & PM_MASK;
+ if (reg16 == PM_OUTPUT)
+ return GPIO_LINE_DIRECTION_OUT;
+ }
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int rzg2l_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZG2L_PIN_ID_TO_PORT(offset);
+ u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
+
+ rzg2l_gpio_set_direction(pctrl, port, bit, false);
+
+ return 0;
+}
+
+static void rzg2l_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZG2L_PIN_ID_TO_PORT(offset);
+ u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
+ unsigned long flags;
+ u8 reg8;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ reg8 = readb(pctrl->base + P(port));
+
+ if (value)
+ writeb(reg8 | BIT(bit), pctrl->base + P(port));
+ else
+ writeb(reg8 & ~BIT(bit), pctrl->base + P(port));
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int rzg2l_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZG2L_PIN_ID_TO_PORT(offset);
+ u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
+
+ rzg2l_gpio_set(chip, offset, value);
+ rzg2l_gpio_set_direction(pctrl, port, bit, true);
+
+ return 0;
+}
+
+static int rzg2l_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZG2L_PIN_ID_TO_PORT(offset);
+ u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
+ u16 reg16;
+
+ reg16 = readw(pctrl->base + PM(port));
+ reg16 = (reg16 >> (bit * 2)) & PM_MASK;
+
+ if (reg16 == PM_INPUT)
+ return !!(readb(pctrl->base + PIN(port)) & BIT(bit));
+ else if (reg16 == PM_OUTPUT)
+ return !!(readb(pctrl->base + P(port)) & BIT(bit));
+ else
+ return -EINVAL;
+}
+
+static void rzg2l_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ pinctrl_gpio_free(chip->base + offset);
+
+ /*
+ * Set the GPIO as an input to ensure that the next GPIO request won't
+ * drive the GPIO pin as an output.
+ */
+ rzg2l_gpio_direction_input(chip, offset);
+}
+
+static const char * const rzg2l_gpio_names[] = {
+ "P0_0", "P0_1", "P0_2", "P0_3", "P0_4", "P0_5", "P0_6", "P0_7",
+ "P1_0", "P1_1", "P1_2", "P1_3", "P1_4", "P1_5", "P1_6", "P1_7",
+ "P2_0", "P2_1", "P2_2", "P2_3", "P2_4", "P2_5", "P2_6", "P2_7",
+ "P3_0", "P3_1", "P3_2", "P3_3", "P3_4", "P3_5", "P3_6", "P3_7",
+ "P4_0", "P4_1", "P4_2", "P4_3", "P4_4", "P4_5", "P4_6", "P4_7",
+ "P5_0", "P5_1", "P5_2", "P5_3", "P5_4", "P5_5", "P5_6", "P5_7",
+ "P6_0", "P6_1", "P6_2", "P6_3", "P6_4", "P6_5", "P6_6", "P6_7",
+ "P7_0", "P7_1", "P7_2", "P7_3", "P7_4", "P7_5", "P7_6", "P7_7",
+ "P8_0", "P8_1", "P8_2", "P8_3", "P8_4", "P8_5", "P8_6", "P8_7",
+ "P9_0", "P9_1", "P9_2", "P9_3", "P9_4", "P9_5", "P9_6", "P9_7",
+ "P10_0", "P10_1", "P10_2", "P10_3", "P10_4", "P10_5", "P10_6", "P10_7",
+ "P11_0", "P11_1", "P11_2", "P11_3", "P11_4", "P11_5", "P11_6", "P11_7",
+ "P12_0", "P12_1", "P12_2", "P12_3", "P12_4", "P12_5", "P12_6", "P12_7",
+ "P13_0", "P13_1", "P13_2", "P13_3", "P13_4", "P13_5", "P13_6", "P13_7",
+ "P14_0", "P14_1", "P14_2", "P14_3", "P14_4", "P14_5", "P14_6", "P14_7",
+ "P15_0", "P15_1", "P15_2", "P15_3", "P15_4", "P15_5", "P15_6", "P15_7",
+ "P16_0", "P16_1", "P16_2", "P16_3", "P16_4", "P16_5", "P16_6", "P16_7",
+ "P17_0", "P17_1", "P17_2", "P17_3", "P17_4", "P17_5", "P17_6", "P17_7",
+ "P18_0", "P18_1", "P18_2", "P18_3", "P18_4", "P18_5", "P18_6", "P18_7",
+ "P19_0", "P19_1", "P19_2", "P19_3", "P19_4", "P19_5", "P19_6", "P19_7",
+ "P20_0", "P20_1", "P20_2", "P20_3", "P20_4", "P20_5", "P20_6", "P20_7",
+ "P21_0", "P21_1", "P21_2", "P21_3", "P21_4", "P21_5", "P21_6", "P21_7",
+ "P22_0", "P22_1", "P22_2", "P22_3", "P22_4", "P22_5", "P22_6", "P22_7",
+ "P23_0", "P23_1", "P23_2", "P23_3", "P23_4", "P23_5", "P23_6", "P23_7",
+ "P24_0", "P24_1", "P24_2", "P24_3", "P24_4", "P24_5", "P24_6", "P24_7",
+ "P25_0", "P25_1", "P25_2", "P25_3", "P25_4", "P25_5", "P25_6", "P25_7",
+ "P26_0", "P26_1", "P26_2", "P26_3", "P26_4", "P26_5", "P26_6", "P26_7",
+ "P27_0", "P27_1", "P27_2", "P27_3", "P27_4", "P27_5", "P27_6", "P27_7",
+ "P28_0", "P28_1", "P28_2", "P28_3", "P28_4", "P28_5", "P28_6", "P28_7",
+ "P29_0", "P29_1", "P29_2", "P29_3", "P29_4", "P29_5", "P29_6", "P29_7",
+ "P30_0", "P30_1", "P30_2", "P30_3", "P30_4", "P30_5", "P30_6", "P30_7",
+ "P31_0", "P31_1", "P31_2", "P31_3", "P31_4", "P31_5", "P31_6", "P31_7",
+ "P32_0", "P32_1", "P32_2", "P32_3", "P32_4", "P32_5", "P32_6", "P32_7",
+ "P33_0", "P33_1", "P33_2", "P33_3", "P33_4", "P33_5", "P33_6", "P33_7",
+ "P34_0", "P34_1", "P34_2", "P34_3", "P34_4", "P34_5", "P34_6", "P34_7",
+ "P35_0", "P35_1", "P35_2", "P35_3", "P35_4", "P35_5", "P35_6", "P35_7",
+ "P36_0", "P36_1", "P36_2", "P36_3", "P36_4", "P36_5", "P36_6", "P36_7",
+ "P37_0", "P37_1", "P37_2", "P37_3", "P37_4", "P37_5", "P37_6", "P37_7",
+ "P38_0", "P38_1", "P38_2", "P38_3", "P38_4", "P38_5", "P38_6", "P38_7",
+ "P39_0", "P39_1", "P39_2", "P39_3", "P39_4", "P39_5", "P39_6", "P39_7",
+ "P40_0", "P40_1", "P40_2", "P40_3", "P40_4", "P40_5", "P40_6", "P40_7",
+ "P41_0", "P41_1", "P41_2", "P41_3", "P41_4", "P41_5", "P41_6", "P41_7",
+ "P42_0", "P42_1", "P42_2", "P42_3", "P42_4", "P42_5", "P42_6", "P42_7",
+ "P43_0", "P43_1", "P43_2", "P43_3", "P43_4", "P43_5", "P43_6", "P43_7",
+ "P44_0", "P44_1", "P44_2", "P44_3", "P44_4", "P44_5", "P44_6", "P44_7",
+ "P45_0", "P45_1", "P45_2", "P45_3", "P45_4", "P45_5", "P45_6", "P45_7",
+ "P46_0", "P46_1", "P46_2", "P46_3", "P46_4", "P46_5", "P46_6", "P46_7",
+ "P47_0", "P47_1", "P47_2", "P47_3", "P47_4", "P47_5", "P47_6", "P47_7",
+ "P48_0", "P48_1", "P48_2", "P48_3", "P48_4", "P48_5", "P48_6", "P48_7",
+};
+
+static const u32 rzg2l_gpio_configs[] = {
+ RZG2L_GPIO_PORT_PACK(2, 0x10, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x11, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x12, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x13, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x14, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(3, 0x15, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x16, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(3, 0x17, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(3, 0x18, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x19, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x1a, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x1b, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x1c, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(3, 0x1d, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x1e, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x1f, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x20, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(3, 0x22, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x22, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x23, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(3, 0x24, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x25, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x26, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x27, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x28, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x29, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2a, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2b, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2c, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2d, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2e, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2f, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x30, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x31, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x32, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x33, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x34, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
+ RZG2L_GPIO_PORT_PACK(3, 0x35, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x36, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(3, 0x37, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(3, 0x38, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x39, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(5, 0x3a, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(4, 0x3b, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(4, 0x3c, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(4, 0x3d, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(4, 0x3e, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(4, 0x3f, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(5, 0x40, RZG2L_MPXED_PIN_FUNCS),
+};
+
+static struct rzg2l_dedicated_configs rzg2l_dedicated_pins[] = {
+ { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0,
+ (PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL)) },
+ { "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x2, 0,
+ (PIN_CFG_SR | PIN_CFG_IOLH | PIN_CFG_IEN)) },
+ { "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 0,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN)) },
+ { "AUDIO_CLK1", RZG2L_SINGLE_PIN_PACK(0x4, 0, PIN_CFG_IEN) },
+ { "AUDIO_CLK2", RZG2L_SINGLE_PIN_PACK(0x4, 1, PIN_CFG_IEN) },
+ { "SD0_CLK", RZG2L_SINGLE_PIN_PACK(0x6, 0,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_SD0)) },
+ { "SD0_CMD", RZG2L_SINGLE_PIN_PACK(0x6, 1,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ { "SD0_RST#", RZG2L_SINGLE_PIN_PACK(0x6, 2,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_SD0)) },
+ { "SD0_DATA0", RZG2L_SINGLE_PIN_PACK(0x7, 0,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ { "SD0_DATA1", RZG2L_SINGLE_PIN_PACK(0x7, 1,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ { "SD0_DATA2", RZG2L_SINGLE_PIN_PACK(0x7, 2,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ { "SD0_DATA3", RZG2L_SINGLE_PIN_PACK(0x7, 3,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ { "SD0_DATA4", RZG2L_SINGLE_PIN_PACK(0x7, 4,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ { "SD0_DATA5", RZG2L_SINGLE_PIN_PACK(0x7, 5,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ { "SD0_DATA6", RZG2L_SINGLE_PIN_PACK(0x7, 6,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ { "SD0_DATA7", RZG2L_SINGLE_PIN_PACK(0x7, 7,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ { "SD1_CLK", RZG2L_SINGLE_PIN_PACK(0x8, 0,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_SD1))},
+ { "SD1_CMD", RZG2L_SINGLE_PIN_PACK(0x8, 1,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ { "SD1_DATA0", RZG2L_SINGLE_PIN_PACK(0x9, 0,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ { "SD1_DATA1", RZG2L_SINGLE_PIN_PACK(0x9, 1,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ { "SD1_DATA2", RZG2L_SINGLE_PIN_PACK(0x9, 2,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ { "SD1_DATA3", RZG2L_SINGLE_PIN_PACK(0x9, 3,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ { "QSPI0_SPCLK", RZG2L_SINGLE_PIN_PACK(0xa, 0,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "QSPI0_IO0", RZG2L_SINGLE_PIN_PACK(0xa, 1,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "QSPI0_IO1", RZG2L_SINGLE_PIN_PACK(0xa, 2,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "QSPI0_IO2", RZG2L_SINGLE_PIN_PACK(0xa, 3,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "QSPI0_IO3", RZG2L_SINGLE_PIN_PACK(0xa, 4,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "QSPI0_SSL", RZG2L_SINGLE_PIN_PACK(0xa, 5,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "QSPI1_SPCLK", RZG2L_SINGLE_PIN_PACK(0xb, 0,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "QSPI1_IO0", RZG2L_SINGLE_PIN_PACK(0xb, 1,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "QSPI1_IO1", RZG2L_SINGLE_PIN_PACK(0xb, 2,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "QSPI1_IO2", RZG2L_SINGLE_PIN_PACK(0xb, 3,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "QSPI1_IO3", RZG2L_SINGLE_PIN_PACK(0xb, 4,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "QSPI1_SSL", RZG2L_SINGLE_PIN_PACK(0xb, 5,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "QSPI_RESET#", RZG2L_SINGLE_PIN_PACK(0xc, 0,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "QSPI_WP#", RZG2L_SINGLE_PIN_PACK(0xc, 1,
+ (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "QSPI_INT#", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ { "WDTOVF_PERROUT#", RZG2L_SINGLE_PIN_PACK(0xd, 0, (PIN_CFG_IOLH | PIN_CFG_SR)) },
+ { "RIIC0_SDA", RZG2L_SINGLE_PIN_PACK(0xe, 0, PIN_CFG_IEN) },
+ { "RIIC0_SCL", RZG2L_SINGLE_PIN_PACK(0xe, 1, PIN_CFG_IEN) },
+ { "RIIC1_SDA", RZG2L_SINGLE_PIN_PACK(0xe, 2, PIN_CFG_IEN) },
+ { "RIIC1_SCL", RZG2L_SINGLE_PIN_PACK(0xe, 3, PIN_CFG_IEN) },
+};
+
+static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
+{
+ struct device_node *np = pctrl->dev->of_node;
+ struct gpio_chip *chip = &pctrl->gpio_chip;
+ const char *name = dev_name(pctrl->dev);
+ struct of_phandle_args of_args;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &of_args);
+ if (ret) {
+ dev_err(pctrl->dev, "Unable to parse gpio-ranges\n");
+ return ret;
+ }
+
+ if (of_args.args[0] != 0 || of_args.args[1] != 0 ||
+ of_args.args[2] != ARRAY_SIZE(rzg2l_gpio_names)) {
+ dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n");
+ return -EINVAL;
+ }
+
+ chip->names = rzg2l_gpio_names;
+ chip->request = rzg2l_gpio_request;
+ chip->free = rzg2l_gpio_free;
+ chip->get_direction = rzg2l_gpio_get_direction;
+ chip->direction_input = rzg2l_gpio_direction_input;
+ chip->direction_output = rzg2l_gpio_direction_output;
+ chip->get = rzg2l_gpio_get;
+ chip->set = rzg2l_gpio_set;
+ chip->label = name;
+ chip->parent = pctrl->dev;
+ chip->owner = THIS_MODULE;
+ chip->base = -1;
+ chip->ngpio = of_args.args[2];
+
+ pctrl->gpio_range.id = 0;
+ pctrl->gpio_range.pin_base = 0;
+ pctrl->gpio_range.base = 0;
+ pctrl->gpio_range.npins = chip->ngpio;
+ pctrl->gpio_range.name = chip->label;
+ pctrl->gpio_range.gc = chip;
+ ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO controller\n");
+ return ret;
+ }
+
+ dev_dbg(pctrl->dev, "Registered gpio controller\n");
+
+ return 0;
+}
+
+static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
+{
+ struct pinctrl_pin_desc *pins;
+ unsigned int i, j;
+ u32 *pin_data;
+ int ret;
+
+ pctrl->desc.name = DRV_NAME;
+ pctrl->desc.npins = pctrl->data->n_port_pins + pctrl->data->n_dedicated_pins;
+ pctrl->desc.pctlops = &rzg2l_pinctrl_pctlops;
+ pctrl->desc.pmxops = &rzg2l_pinctrl_pmxops;
+ pctrl->desc.confops = &rzg2l_pinctrl_confops;
+ pctrl->desc.owner = THIS_MODULE;
+
+ pins = devm_kcalloc(pctrl->dev, pctrl->desc.npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ pin_data = devm_kcalloc(pctrl->dev, pctrl->desc.npins,
+ sizeof(*pin_data), GFP_KERNEL);
+ if (!pin_data)
+ return -ENOMEM;
+
+ pctrl->pins = pins;
+ pctrl->desc.pins = pins;
+
+ for (i = 0, j = 0; i < pctrl->data->n_port_pins; i++) {
+ pins[i].number = i;
+ pins[i].name = pctrl->data->port_pins[i];
+ if (i && !(i % RZG2L_PINS_PER_PORT))
+ j++;
+ pin_data[i] = pctrl->data->port_pin_configs[j];
+ pins[i].drv_data = &pin_data[i];
+ }
+
+ for (i = 0; i < pctrl->data->n_dedicated_pins; i++) {
+ unsigned int index = pctrl->data->n_port_pins + i;
+
+ pins[index].number = index;
+ pins[index].name = pctrl->data->dedicated_pins[i].name;
+ pin_data[index] = pctrl->data->dedicated_pins[i].config;
+ pins[index].drv_data = &pin_data[index];
+ }
+
+ ret = devm_pinctrl_register_and_init(pctrl->dev, &pctrl->desc, pctrl,
+ &pctrl->pctl);
+ if (ret) {
+ dev_err(pctrl->dev, "pinctrl registration failed\n");
+ return ret;
+ }
+
+ ret = pinctrl_enable(pctrl->pctl);
+ if (ret) {
+ dev_err(pctrl->dev, "pinctrl enable failed\n");
+ return ret;
+ }
+
+ ret = rzg2l_gpio_register(pctrl);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO chip: %i\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rzg2l_pinctrl_clk_disable(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int rzg2l_pinctrl_probe(struct platform_device *pdev)
+{
+ struct rzg2l_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->dev = &pdev->dev;
+
+ pctrl->data = of_device_get_match_data(&pdev->dev);
+ if (!pctrl->data)
+ return -EINVAL;
+
+ pctrl->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pctrl->base))
+ return PTR_ERR(pctrl->base);
+
+ pctrl->clk = devm_clk_get(pctrl->dev, NULL);
+ if (IS_ERR(pctrl->clk)) {
+ ret = PTR_ERR(pctrl->clk);
+ dev_err(pctrl->dev, "failed to get GPIO clk : %i\n", ret);
+ return ret;
+ }
+
+ spin_lock_init(&pctrl->lock);
+
+ platform_set_drvdata(pdev, pctrl);
+
+ ret = clk_prepare_enable(pctrl->clk);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to enable GPIO clk: %i\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev, rzg2l_pinctrl_clk_disable,
+ pctrl->clk);
+ if (ret) {
+ dev_err(pctrl->dev,
+ "failed to register GPIO clk disable action, %i\n",
+ ret);
+ return ret;
+ }
+
+ ret = rzg2l_pinctrl_register(pctrl);
+ if (ret)
+ return ret;
+
+ dev_info(pctrl->dev, "%s support registered\n", DRV_NAME);
+ return 0;
+}
+
+static struct rzg2l_pinctrl_data r9a07g044_data = {
+ .port_pins = rzg2l_gpio_names,
+ .port_pin_configs = rzg2l_gpio_configs,
+ .dedicated_pins = rzg2l_dedicated_pins,
+ .n_port_pins = ARRAY_SIZE(rzg2l_gpio_names),
+ .n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins),
+};
+
+static const struct of_device_id rzg2l_pinctrl_of_table[] = {
+ {
+ .compatible = "renesas,r9a07g044-pinctrl",
+ .data = &r9a07g044_data,
+ },
+ { /* sentinel */ }
+};
+
+static struct platform_driver rzg2l_pinctrl_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(rzg2l_pinctrl_of_table),
+ },
+ .probe = rzg2l_pinctrl_probe,
+};
+
+static int __init rzg2l_pinctrl_init(void)
+{
+ return platform_driver_register(&rzg2l_pinctrl_driver);
+}
+core_initcall(rzg2l_pinctrl_init);
+
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Pin and gpio controller driver for RZ/G2L family");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index bb488af29862..f3eecb20c086 100644
--- a/drivers/pinctrl/renesas/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
@@ -841,7 +841,7 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
return pinctrl_enable(pmx->pctl);
}
-static const struct pinmux_bias_reg *
+const struct pinmux_bias_reg *
rcar_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
unsigned int *bit)
{
@@ -898,17 +898,17 @@ void rcar_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
if (reg->puen) {
enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
- if (bias != PIN_CONFIG_BIAS_DISABLE)
+ if (bias != PIN_CONFIG_BIAS_DISABLE) {
enable |= BIT(bit);
- if (reg->pud) {
- updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
- if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= BIT(bit);
+ if (reg->pud) {
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
+ if (bias == PIN_CONFIG_BIAS_PULL_UP)
+ updown |= BIT(bit);
- sh_pfc_write(pfc, reg->pud, updown);
+ sh_pfc_write(pfc, reg->pud, updown);
+ }
}
-
sh_pfc_write(pfc, reg->puen, enable);
} else {
enable = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
diff --git a/drivers/pinctrl/renesas/sh_pfc.h b/drivers/pinctrl/renesas/sh_pfc.h
index 320898861c4b..2479b4fb9cf9 100644
--- a/drivers/pinctrl/renesas/sh_pfc.h
+++ b/drivers/pinctrl/renesas/sh_pfc.h
@@ -332,8 +332,8 @@ extern const struct sh_pfc_soc_info r8a7791_pinmux_info;
extern const struct sh_pfc_soc_info r8a7792_pinmux_info;
extern const struct sh_pfc_soc_info r8a7793_pinmux_info;
extern const struct sh_pfc_soc_info r8a7794_pinmux_info;
-extern const struct sh_pfc_soc_info r8a77950_pinmux_info __weak;
-extern const struct sh_pfc_soc_info r8a77951_pinmux_info __weak;
+extern const struct sh_pfc_soc_info r8a77950_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77951_pinmux_info;
extern const struct sh_pfc_soc_info r8a77960_pinmux_info;
extern const struct sh_pfc_soc_info r8a77961_pinmux_info;
extern const struct sh_pfc_soc_info r8a77965_pinmux_info;
@@ -781,6 +781,9 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
/*
* Bias helpers
*/
+const struct pinmux_bias_reg *
+rcar_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
+ unsigned int *bit);
unsigned int rcar_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin);
void rcar_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index b6e56422a700..fe5f6046fbd5 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -40,6 +40,24 @@ static const struct samsung_pin_bank_type exynos5433_bank_type_alive = {
.reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
};
+/*
+ * Bank type for non-alive type. Bit fields:
+ * CON: 4, DAT: 1, PUD: 4, DRV: 4, CONPDN: 2, PUDPDN: 4
+ */
+static const struct samsung_pin_bank_type exynos850_bank_type_off = {
+ .fld_width = { 4, 1, 4, 4, 2, 4, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, },
+};
+
+/*
+ * Bank type for alive type. Bit fields:
+ * CON: 4, DAT: 1, PUD: 4, DRV: 4
+ */
+static const struct samsung_pin_bank_type exynos850_bank_type_alive = {
+ .fld_width = { 4, 1, 4, 4, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
+};
+
/* Pad retention control code for accessing PMU regmap */
static atomic_t exynos_shared_retention_refcnt;
@@ -422,3 +440,101 @@ const struct samsung_pinctrl_of_match_data exynos7_of_data __initconst = {
.ctrl = exynos7_pin_ctrl,
.num_ctrl = ARRAY_SIZE(exynos7_pin_ctrl),
};
+
+/* pin banks of exynos850 pin-controller 0 (ALIVE) */
+static const struct samsung_pin_bank_data exynos850_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
+ EXYNOS850_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
+ EXYNOS850_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
+ EXYNOS850_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
+ EXYNOS850_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
+ EXYNOS850_PIN_BANK_EINTW(4, 0x080, "gpa4", 0x10),
+ EXYNOS850_PIN_BANK_EINTN(3, 0x0a0, "gpq0"),
+};
+
+/* pin banks of exynos850 pin-controller 1 (CMGP) */
+static const struct samsung_pin_bank_data exynos850_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
+ EXYNOS850_PIN_BANK_EINTW(1, 0x000, "gpm0", 0x00),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x020, "gpm1", 0x04),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x040, "gpm2", 0x08),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x060, "gpm3", 0x0c),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x080, "gpm4", 0x10),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x0a0, "gpm5", 0x14),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x0c0, "gpm6", 0x18),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x0e0, "gpm7", 0x1c),
+};
+
+/* pin banks of exynos850 pin-controller 2 (AUD) */
+static const struct samsung_pin_bank_data exynos850_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
+ EXYNOS850_PIN_BANK_EINTG(5, 0x000, "gpb0", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(5, 0x020, "gpb1", 0x04),
+};
+
+/* pin banks of exynos850 pin-controller 3 (HSI) */
+static const struct samsung_pin_bank_data exynos850_pin_banks3[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
+ EXYNOS850_PIN_BANK_EINTG(6, 0x000, "gpf2", 0x00),
+};
+
+/* pin banks of exynos850 pin-controller 4 (CORE) */
+static const struct samsung_pin_bank_data exynos850_pin_banks4[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
+ EXYNOS850_PIN_BANK_EINTG(4, 0x000, "gpf0", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x020, "gpf1", 0x04),
+};
+
+/* pin banks of exynos850 pin-controller 5 (PERI) */
+static const struct samsung_pin_bank_data exynos850_pin_banks5[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
+ EXYNOS850_PIN_BANK_EINTG(2, 0x000, "gpg0", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(6, 0x020, "gpp0", 0x04),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x040, "gpp1", 0x08),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x060, "gpp2", 0x0c),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x080, "gpg1", 0x10),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x0a0, "gpg2", 0x14),
+ EXYNOS850_PIN_BANK_EINTG(1, 0x0c0, "gpg3", 0x18),
+ EXYNOS850_PIN_BANK_EINTG(3, 0x0e0, "gpc0", 0x1c),
+ EXYNOS850_PIN_BANK_EINTG(6, 0x100, "gpc1", 0x20),
+};
+
+static const struct samsung_pin_ctrl exynos850_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 ALIVE data */
+ .pin_banks = exynos850_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos850_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ }, {
+ /* pin-controller instance 1 CMGP data */
+ .pin_banks = exynos850_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos850_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ }, {
+ /* pin-controller instance 2 AUD data */
+ .pin_banks = exynos850_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos850_pin_banks2),
+ }, {
+ /* pin-controller instance 3 HSI data */
+ .pin_banks = exynos850_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynos850_pin_banks3),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 4 CORE data */
+ .pin_banks = exynos850_pin_banks4,
+ .nr_banks = ARRAY_SIZE(exynos850_pin_banks4),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 5 PERI data */
+ .pin_banks = exynos850_pin_banks5,
+ .nr_banks = ARRAY_SIZE(exynos850_pin_banks5),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ },
+};
+
+const struct samsung_pinctrl_of_match_data exynos850_of_data __initconst = {
+ .ctrl = exynos850_pin_ctrl,
+ .num_ctrl = ARRAY_SIZE(exynos850_pin_ctrl),
+};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index da1ec13697e7..bfad1ced8017 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -108,6 +108,35 @@
.pctl_res_idx = pctl_idx, \
} \
+#define EXYNOS850_PIN_BANK_EINTN(pins, reg, id) \
+ { \
+ .type = &exynos850_bank_type_alive, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_NONE, \
+ .name = id \
+ }
+
+#define EXYNOS850_PIN_BANK_EINTG(pins, reg, id, offs) \
+ { \
+ .type = &exynos850_bank_type_off, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_GPIO, \
+ .eint_offset = offs, \
+ .name = id \
+ }
+
+#define EXYNOS850_PIN_BANK_EINTW(pins, reg, id, offs) \
+ { \
+ .type = &exynos850_bank_type_alive, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_WKUP, \
+ .eint_offset = offs, \
+ .name = id \
+ }
+
/**
* struct exynos_weint_data: irq specific data for all the wakeup interrupts
* generated by the external wakeup interrupt controller.
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 376876bd6605..2a0fc63516f1 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -918,7 +918,7 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
pin_bank->grange.pin_base = drvdata->pin_base
+ pin_bank->pin_base;
pin_bank->grange.base = pin_bank->grange.pin_base;
- pin_bank->grange.npins = pin_bank->gpio_chip.ngpio;
+ pin_bank->grange.npins = pin_bank->nr_pins;
pin_bank->grange.gc = &pin_bank->gpio_chip;
pinctrl_add_gpio_range(drvdata->pctl_dev, &pin_bank->grange);
}
@@ -1264,6 +1264,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = &exynos5433_of_data },
{ .compatible = "samsung,exynos7-pinctrl",
.data = &exynos7_of_data },
+ { .compatible = "samsung,exynos850-pinctrl",
+ .data = &exynos850_of_data },
#endif
#ifdef CONFIG_PINCTRL_S3C64XX
{ .compatible = "samsung,s3c64xx-pinctrl",
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index de44f8ec330b..4c2149e9c544 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -339,6 +339,7 @@ extern const struct samsung_pinctrl_of_match_data exynos5410_of_data;
extern const struct samsung_pinctrl_of_match_data exynos5420_of_data;
extern const struct samsung_pinctrl_of_match_data exynos5433_of_data;
extern const struct samsung_pinctrl_of_match_data exynos7_of_data;
+extern const struct samsung_pinctrl_of_match_data exynos850_of_data;
extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data;
extern const struct samsung_pinctrl_of_match_data s3c2412_of_data;
extern const struct samsung_pinctrl_of_match_data s3c2416_of_data;
diff --git a/drivers/pinctrl/stm32/Kconfig b/drivers/pinctrl/stm32/Kconfig
index f36f29113370..d532f3c6f670 100644
--- a/drivers/pinctrl/stm32/Kconfig
+++ b/drivers/pinctrl/stm32/Kconfig
@@ -40,6 +40,12 @@ config PINCTRL_STM32H743
default MACH_STM32H743
select PINCTRL_STM32
+config PINCTRL_STM32MP135
+ bool "STMicroelectronics STM32MP135 pin control" if COMPILE_TEST && !MACH_STM32MP13
+ depends on OF && HAS_IOMEM
+ default MACH_STM32MP13
+ select PINCTRL_STM32
+
config PINCTRL_STM32MP157
bool "STMicroelectronics STM32MP157 pin control" if COMPILE_TEST && !MACH_STM32MP157
depends on OF && HAS_IOMEM
diff --git a/drivers/pinctrl/stm32/Makefile b/drivers/pinctrl/stm32/Makefile
index f7c56d4b941c..619629ee9944 100644
--- a/drivers/pinctrl/stm32/Makefile
+++ b/drivers/pinctrl/stm32/Makefile
@@ -8,4 +8,5 @@ obj-$(CONFIG_PINCTRL_STM32F469) += pinctrl-stm32f469.o
obj-$(CONFIG_PINCTRL_STM32F746) += pinctrl-stm32f746.o
obj-$(CONFIG_PINCTRL_STM32F769) += pinctrl-stm32f769.o
obj-$(CONFIG_PINCTRL_STM32H743) += pinctrl-stm32h743.o
+obj-$(CONFIG_PINCTRL_STM32MP135) += pinctrl-stm32mp135.o
obj-$(CONFIG_PINCTRL_STM32MP157) += pinctrl-stm32mp157.o
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32mp135.c b/drivers/pinctrl/stm32/pinctrl-stm32mp135.c
new file mode 100644
index 000000000000..4ab03520c407
--- /dev/null
+++ b/drivers/pinctrl/stm32/pinctrl-stm32mp135.c
@@ -0,0 +1,1679 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2021 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
+ */
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-stm32.h"
+
+static const struct stm32_desc_pin stm32mp135_pins[] = {
+ STM32_PIN(
+ PINCTRL_PIN(0, "PA0"),
+ STM32_FUNCTION(0, "GPIOA0"),
+ STM32_FUNCTION(2, "TIM2_CH1"),
+ STM32_FUNCTION(3, "TIM5_CH1"),
+ STM32_FUNCTION(4, "TIM8_ETR"),
+ STM32_FUNCTION(5, "TIM15_BKIN"),
+ STM32_FUNCTION(7, "SAI1_SD_B"),
+ STM32_FUNCTION(9, "UART5_TX"),
+ STM32_FUNCTION(12, "ETH1_MII_CRS"),
+ STM32_FUNCTION(13, "ETH2_MII_CRS"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(1, "PA1"),
+ STM32_FUNCTION(0, "GPIOA1"),
+ STM32_FUNCTION(2, "TIM2_CH2"),
+ STM32_FUNCTION(3, "TIM5_CH2"),
+ STM32_FUNCTION(4, "LPTIM3_OUT"),
+ STM32_FUNCTION(5, "TIM15_CH1N"),
+ STM32_FUNCTION(7, "DFSDM1_CKIN0"),
+ STM32_FUNCTION(8, "USART2_RTS USART2_DE"),
+ STM32_FUNCTION(12, "ETH1_MII_RX_CLK ETH1_RGMII_RX_CLK ETH1_RMII_REF_CLK"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(2, "PA2"),
+ STM32_FUNCTION(0, "GPIOA2"),
+ STM32_FUNCTION(2, "TIM2_CH3"),
+ STM32_FUNCTION(3, "TIM5_CH3"),
+ STM32_FUNCTION(4, "LPTIM4_OUT"),
+ STM32_FUNCTION(5, "TIM15_CH1"),
+ STM32_FUNCTION(8, "USART2_TX"),
+ STM32_FUNCTION(12, "ETH1_MDIO"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(3, "PA3"),
+ STM32_FUNCTION(0, "GPIOA3"),
+ STM32_FUNCTION(2, "TIM2_CH4"),
+ STM32_FUNCTION(3, "TIM5_CH4"),
+ STM32_FUNCTION(4, "LPTIM5_OUT"),
+ STM32_FUNCTION(5, "TIM15_CH2"),
+ STM32_FUNCTION(6, "SPI1_MOSI I2S1_SDO"),
+ STM32_FUNCTION(7, "SAI1_FS_B"),
+ STM32_FUNCTION(8, "USART2_RX"),
+ STM32_FUNCTION(12, "ETH1_MII_COL"),
+ STM32_FUNCTION(13, "ETH2_MII_COL"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(4, "PA4"),
+ STM32_FUNCTION(0, "GPIOA4"),
+ STM32_FUNCTION(3, "TIM5_ETR"),
+ STM32_FUNCTION(4, "USART2_CK"),
+ STM32_FUNCTION(5, "SAI1_SCK_B"),
+ STM32_FUNCTION(6, "SPI1_NSS I2S1_WS"),
+ STM32_FUNCTION(7, "DFSDM1_CKIN1"),
+ STM32_FUNCTION(11, "ETH1_PPS_OUT"),
+ STM32_FUNCTION(12, "ETH2_PPS_OUT"),
+ STM32_FUNCTION(13, "SAI1_SCK_A"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(5, "PA5"),
+ STM32_FUNCTION(0, "GPIOA5"),
+ STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
+ STM32_FUNCTION(3, "USART2_CK"),
+ STM32_FUNCTION(4, "TIM8_CH1N"),
+ STM32_FUNCTION(5, "SAI1_D1"),
+ STM32_FUNCTION(6, "SPI1_NSS I2S1_WS"),
+ STM32_FUNCTION(7, "SAI1_SD_A"),
+ STM32_FUNCTION(11, "ETH1_PPS_OUT"),
+ STM32_FUNCTION(12, "ETH2_PPS_OUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(6, "PA6"),
+ STM32_FUNCTION(0, "GPIOA6"),
+ STM32_FUNCTION(2, "TIM1_BKIN"),
+ STM32_FUNCTION(3, "TIM3_CH1"),
+ STM32_FUNCTION(4, "TIM8_BKIN"),
+ STM32_FUNCTION(5, "SAI2_CK2"),
+ STM32_FUNCTION(6, "SPI1_MISO I2S1_SDI"),
+ STM32_FUNCTION(8, "USART1_CK"),
+ STM32_FUNCTION(9, "UART4_RTS UART4_DE"),
+ STM32_FUNCTION(10, "TIM13_CH1"),
+ STM32_FUNCTION(13, "SAI2_SCK_A"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(7, "PA7"),
+ STM32_FUNCTION(0, "GPIOA7"),
+ STM32_FUNCTION(2, "TIM1_CH1N"),
+ STM32_FUNCTION(3, "TIM3_CH2"),
+ STM32_FUNCTION(4, "TIM8_CH1N"),
+ STM32_FUNCTION(5, "SAI2_D1"),
+ STM32_FUNCTION(6, "SPI1_SCK I2S1_CK"),
+ STM32_FUNCTION(8, "USART1_CTS USART1_NSS"),
+ STM32_FUNCTION(10, "TIM14_CH1"),
+ STM32_FUNCTION(12, "ETH1_MII_RX_DV ETH1_RGMII_RX_CTL ETH1_RMII_CRS_DV"),
+ STM32_FUNCTION(13, "SAI2_SD_A"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(8, "PA8"),
+ STM32_FUNCTION(0, "GPIOA8"),
+ STM32_FUNCTION(1, "MCO1"),
+ STM32_FUNCTION(3, "SAI2_MCLK_A"),
+ STM32_FUNCTION(4, "TIM8_BKIN2"),
+ STM32_FUNCTION(5, "I2C4_SDA"),
+ STM32_FUNCTION(6, "SPI5_MISO"),
+ STM32_FUNCTION(7, "SAI2_CK1"),
+ STM32_FUNCTION(8, "USART1_CK"),
+ STM32_FUNCTION(9, "SPI2_MOSI I2S2_SDO"),
+ STM32_FUNCTION(11, "OTG_HS_SOF"),
+ STM32_FUNCTION(12, "ETH2_MII_RXD3 ETH2_RGMII_RXD3"),
+ STM32_FUNCTION(13, "FMC_A21"),
+ STM32_FUNCTION(15, "LCD_B7"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(9, "PA9"),
+ STM32_FUNCTION(0, "GPIOA9"),
+ STM32_FUNCTION(2, "TIM1_CH2"),
+ STM32_FUNCTION(5, "I2C3_SMBA"),
+ STM32_FUNCTION(7, "DFSDM1_DATIN0"),
+ STM32_FUNCTION(8, "USART1_TX"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(11, "FMC_NWAIT"),
+ STM32_FUNCTION(14, "DCMIPP_D0"),
+ STM32_FUNCTION(15, "LCD_R6"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(10, "PA10"),
+ STM32_FUNCTION(0, "GPIOA10"),
+ STM32_FUNCTION(2, "TIM1_CH3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(11, "PA11"),
+ STM32_FUNCTION(0, "GPIOA11"),
+ STM32_FUNCTION(2, "TIM1_CH4"),
+ STM32_FUNCTION(5, "I2C5_SCL"),
+ STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(8, "USART1_CTS USART1_NSS"),
+ STM32_FUNCTION(11, "ETH2_MII_RXD1 ETH2_RGMII_RXD1 ETH2_RMII_RXD1"),
+ STM32_FUNCTION(12, "ETH1_CLK"),
+ STM32_FUNCTION(14, "ETH2_CLK"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(12, "PA12"),
+ STM32_FUNCTION(0, "GPIOA12"),
+ STM32_FUNCTION(2, "TIM1_ETR"),
+ STM32_FUNCTION(3, "SAI2_MCLK_A"),
+ STM32_FUNCTION(8, "USART1_RTS USART1_DE"),
+ STM32_FUNCTION(11, "TSC_G1_IO2"),
+ STM32_FUNCTION(12, "ETH2_MII_RX_DV ETH2_RGMII_RX_CTL ETH2_RMII_CRS_DV"),
+ STM32_FUNCTION(13, "FMC_A7"),
+ STM32_FUNCTION(14, "DCMIPP_D1"),
+ STM32_FUNCTION(15, "LCD_G6"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(13, "PA13"),
+ STM32_FUNCTION(0, "GPIOA13"),
+ STM32_FUNCTION(1, "DBTRGO"),
+ STM32_FUNCTION(2, "DBTRGI"),
+ STM32_FUNCTION(3, "MCO1"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(14, "PA14"),
+ STM32_FUNCTION(0, "GPIOA14"),
+ STM32_FUNCTION(1, "DBTRGO"),
+ STM32_FUNCTION(2, "DBTRGI"),
+ STM32_FUNCTION(3, "MCO2"),
+ STM32_FUNCTION(11, "OTG_HS_SOF"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(15, "PA15"),
+ STM32_FUNCTION(0, "GPIOA15"),
+ STM32_FUNCTION(1, "TRACED5"),
+ STM32_FUNCTION(2, "TIM2_CH1"),
+ STM32_FUNCTION(6, "I2S4_MCK"),
+ STM32_FUNCTION(8, "UART4_RTS UART4_DE"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(10, "LCD_R0"),
+ STM32_FUNCTION(11, "TSC_G3_IO1"),
+ STM32_FUNCTION(12, "LCD_G7"),
+ STM32_FUNCTION(13, "FMC_A9"),
+ STM32_FUNCTION(14, "DCMIPP_D14"),
+ STM32_FUNCTION(15, "DCMIPP_D5"),
+ STM32_FUNCTION(16, "HDP5"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(16, "PB0"),
+ STM32_FUNCTION(0, "GPIOB0"),
+ STM32_FUNCTION(1, "DBTRGI"),
+ STM32_FUNCTION(2, "TIM1_CH2N"),
+ STM32_FUNCTION(3, "TIM3_CH3"),
+ STM32_FUNCTION(4, "TIM8_CH2N"),
+ STM32_FUNCTION(5, "USART1_RX"),
+ STM32_FUNCTION(6, "I2S1_MCK"),
+ STM32_FUNCTION(7, "SAI2_FS_A"),
+ STM32_FUNCTION(8, "USART1_CK"),
+ STM32_FUNCTION(9, "UART4_CTS"),
+ STM32_FUNCTION(11, "SAI2_D2"),
+ STM32_FUNCTION(12, "ETH1_MII_RXD2 ETH1_RGMII_RXD2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(17, "PB1"),
+ STM32_FUNCTION(0, "GPIOB1"),
+ STM32_FUNCTION(2, "TIM1_CH3N"),
+ STM32_FUNCTION(3, "TIM3_CH4"),
+ STM32_FUNCTION(4, "TIM8_CH3N"),
+ STM32_FUNCTION(6, "SPI1_SCK I2S1_CK"),
+ STM32_FUNCTION(7, "DFSDM1_DATIN1"),
+ STM32_FUNCTION(8, "UART4_RX"),
+ STM32_FUNCTION(12, "ETH1_MII_RXD3 ETH1_RGMII_RXD3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(18, "PB2"),
+ STM32_FUNCTION(0, "GPIOB2"),
+ STM32_FUNCTION(2, "RTC_OUT2"),
+ STM32_FUNCTION(3, "SAI1_D1"),
+ STM32_FUNCTION(6, "I2S_CKIN"),
+ STM32_FUNCTION(7, "SAI1_SD_A"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_NCS"),
+ STM32_FUNCTION(12, "ETH2_MDIO"),
+ STM32_FUNCTION(13, "FMC_A6"),
+ STM32_FUNCTION(15, "LCD_B4"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(19, "PB3"),
+ STM32_FUNCTION(0, "GPIOB3"),
+ STM32_FUNCTION(1, "TRACED2"),
+ STM32_FUNCTION(2, "TIM2_CH2"),
+ STM32_FUNCTION(5, "SAI2_CK1"),
+ STM32_FUNCTION(6, "SPI4_NSS I2S4_WS"),
+ STM32_FUNCTION(9, "SDMMC1_D123DIR"),
+ STM32_FUNCTION(11, "SDMMC2_D2"),
+ STM32_FUNCTION(12, "LCD_R6"),
+ STM32_FUNCTION(13, "SAI2_MCLK_A"),
+ STM32_FUNCTION(14, "UART7_RX"),
+ STM32_FUNCTION(15, "LCD_B2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(20, "PB4"),
+ STM32_FUNCTION(0, "GPIOB4"),
+ STM32_FUNCTION(1, "TRACED14"),
+ STM32_FUNCTION(2, "TIM16_BKIN"),
+ STM32_FUNCTION(3, "TIM3_CH1"),
+ STM32_FUNCTION(5, "SAI2_CK2"),
+ STM32_FUNCTION(6, "SPI4_SCK I2S4_CK"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(11, "SDMMC2_D3"),
+ STM32_FUNCTION(12, "LCD_G1"),
+ STM32_FUNCTION(13, "SAI2_SCK_A"),
+ STM32_FUNCTION(14, "LCD_B6"),
+ STM32_FUNCTION(15, "LCD_R0"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(21, "PB5"),
+ STM32_FUNCTION(0, "GPIOB5"),
+ STM32_FUNCTION(1, "TRACED4"),
+ STM32_FUNCTION(2, "TIM17_BKIN"),
+ STM32_FUNCTION(3, "TIM3_CH2"),
+ STM32_FUNCTION(6, "SPI2_MISO I2S2_SDI"),
+ STM32_FUNCTION(7, "I2C4_SMBA"),
+ STM32_FUNCTION(9, "SDMMC1_CKIN"),
+ STM32_FUNCTION(10, "FDCAN2_RX"),
+ STM32_FUNCTION(12, "UART5_RX"),
+ STM32_FUNCTION(14, "LCD_B6"),
+ STM32_FUNCTION(15, "LCD_DE"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(22, "PB6"),
+ STM32_FUNCTION(0, "GPIOB6"),
+ STM32_FUNCTION(1, "TRACED6"),
+ STM32_FUNCTION(2, "TIM16_CH1N"),
+ STM32_FUNCTION(3, "TIM4_CH1"),
+ STM32_FUNCTION(4, "TIM8_CH1"),
+ STM32_FUNCTION(5, "USART1_TX"),
+ STM32_FUNCTION(7, "SAI1_CK2"),
+ STM32_FUNCTION(8, "LCD_B6"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_NCS"),
+ STM32_FUNCTION(11, "TSC_G1_IO4"),
+ STM32_FUNCTION(12, "ETH2_MDIO"),
+ STM32_FUNCTION(13, "FMC_NE3"),
+ STM32_FUNCTION(14, "DCMIPP_D5"),
+ STM32_FUNCTION(15, "LCD_B7"),
+ STM32_FUNCTION(16, "HDP6"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(23, "PB7"),
+ STM32_FUNCTION(0, "GPIOB7"),
+ STM32_FUNCTION(2, "TIM17_CH1N"),
+ STM32_FUNCTION(3, "TIM4_CH2"),
+ STM32_FUNCTION(4, "TSC_SYNC"),
+ STM32_FUNCTION(6, "I2S4_CK"),
+ STM32_FUNCTION(7, "I2C4_SDA"),
+ STM32_FUNCTION(11, "FMC_NCE2"),
+ STM32_FUNCTION(13, "FMC_NL"),
+ STM32_FUNCTION(14, "DCMIPP_D13"),
+ STM32_FUNCTION(15, "DCMIPP_PIXCLK"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(24, "PB8"),
+ STM32_FUNCTION(0, "GPIOB8"),
+ STM32_FUNCTION(2, "TIM16_CH1"),
+ STM32_FUNCTION(3, "TIM4_CH3"),
+ STM32_FUNCTION(5, "I2C1_SCL"),
+ STM32_FUNCTION(6, "I2C3_SCL"),
+ STM32_FUNCTION(7, "DFSDM1_DATIN1"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(11, "SAI1_D1"),
+ STM32_FUNCTION(13, "FMC_D13 FMC_AD13"),
+ STM32_FUNCTION(14, "DCMIPP_D6"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(25, "PB9"),
+ STM32_FUNCTION(0, "GPIOB9"),
+ STM32_FUNCTION(1, "TRACED3"),
+ STM32_FUNCTION(3, "TIM4_CH4"),
+ STM32_FUNCTION(7, "I2C4_SDA"),
+ STM32_FUNCTION(10, "FDCAN1_TX"),
+ STM32_FUNCTION(11, "SDMMC2_D5"),
+ STM32_FUNCTION(12, "UART5_TX"),
+ STM32_FUNCTION(13, "SDMMC1_CDIR"),
+ STM32_FUNCTION(14, "LCD_DE"),
+ STM32_FUNCTION(15, "LCD_B1"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(26, "PB10"),
+ STM32_FUNCTION(0, "GPIOB10"),
+ STM32_FUNCTION(2, "TIM2_CH3"),
+ STM32_FUNCTION(4, "LPTIM2_IN1"),
+ STM32_FUNCTION(5, "I2C5_SMBA"),
+ STM32_FUNCTION(6, "SPI4_NSS I2S4_WS"),
+ STM32_FUNCTION(7, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(8, "USART3_TX"),
+ STM32_FUNCTION(15, "LCD_R3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(27, "PB11"),
+ STM32_FUNCTION(0, "GPIOB11"),
+ STM32_FUNCTION(2, "TIM2_CH4"),
+ STM32_FUNCTION(4, "LPTIM1_OUT"),
+ STM32_FUNCTION(5, "I2C5_SMBA"),
+ STM32_FUNCTION(8, "USART3_RX"),
+ STM32_FUNCTION(12, "ETH1_MII_TX_EN ETH1_RGMII_TX_CTL ETH1_RMII_TX_EN"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(28, "PB12"),
+ STM32_FUNCTION(0, "GPIOB12"),
+ STM32_FUNCTION(1, "TRACED10"),
+ STM32_FUNCTION(5, "I2C2_SMBA"),
+ STM32_FUNCTION(7, "DFSDM1_DATIN1"),
+ STM32_FUNCTION(8, "UART7_RTS UART7_DE"),
+ STM32_FUNCTION(9, "USART3_RX"),
+ STM32_FUNCTION(12, "UART5_RX"),
+ STM32_FUNCTION(13, "SDMMC1_D5"),
+ STM32_FUNCTION(14, "LCD_R3"),
+ STM32_FUNCTION(15, "LCD_VSYNC"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(29, "PB13"),
+ STM32_FUNCTION(0, "GPIOB13"),
+ STM32_FUNCTION(1, "TRACECLK"),
+ STM32_FUNCTION(2, "TIM1_CH1N"),
+ STM32_FUNCTION(5, "LPTIM2_OUT"),
+ STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(7, "I2C4_SCL"),
+ STM32_FUNCTION(9, "SDMMC1_D123DIR"),
+ STM32_FUNCTION(10, "FDCAN2_TX"),
+ STM32_FUNCTION(12, "UART5_TX"),
+ STM32_FUNCTION(14, "LCD_CLK"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(30, "PB14"),
+ STM32_FUNCTION(0, "GPIOB14"),
+ STM32_FUNCTION(1, "TRACED0"),
+ STM32_FUNCTION(2, "TIM1_CH2N"),
+ STM32_FUNCTION(3, "TIM12_CH1"),
+ STM32_FUNCTION(4, "TIM8_CH2N"),
+ STM32_FUNCTION(5, "USART1_TX"),
+ STM32_FUNCTION(11, "SDMMC2_D0"),
+ STM32_FUNCTION(12, "SDMMC1_D4"),
+ STM32_FUNCTION(14, "LCD_R0"),
+ STM32_FUNCTION(15, "LCD_G5"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(31, "PB15"),
+ STM32_FUNCTION(0, "GPIOB15"),
+ STM32_FUNCTION(1, "RTC_REFIN"),
+ STM32_FUNCTION(2, "TIM1_CH3N"),
+ STM32_FUNCTION(3, "TIM12_CH2"),
+ STM32_FUNCTION(4, "TIM8_CH3N"),
+ STM32_FUNCTION(5, "SAI2_D2"),
+ STM32_FUNCTION(6, "SPI4_MOSI I2S4_SDO"),
+ STM32_FUNCTION(7, "DFSDM1_CKIN2"),
+ STM32_FUNCTION(8, "UART7_CTS"),
+ STM32_FUNCTION(9, "SDMMC1_CKIN"),
+ STM32_FUNCTION(11, "SDMMC2_D1"),
+ STM32_FUNCTION(13, "SAI2_FS_A"),
+ STM32_FUNCTION(14, "LCD_CLK"),
+ STM32_FUNCTION(15, "LCD_B0"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(32, "PC0"),
+ STM32_FUNCTION(0, "GPIOC0"),
+ STM32_FUNCTION(3, "SAI1_SCK_A"),
+ STM32_FUNCTION(5, "SAI1_CK2"),
+ STM32_FUNCTION(6, "I2S1_MCK"),
+ STM32_FUNCTION(7, "SPI1_MOSI I2S1_SDO"),
+ STM32_FUNCTION(8, "USART1_TX"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(33, "PC1"),
+ STM32_FUNCTION(0, "GPIOC1"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN0"),
+ STM32_FUNCTION(7, "SAI1_D3"),
+ STM32_FUNCTION(11, "ETH1_MII_RX_DV ETH1_RMII_CRS_DV"),
+ STM32_FUNCTION(12, "ETH1_RGMII_GTX_CLK"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(34, "PC2"),
+ STM32_FUNCTION(0, "GPIOC2"),
+ STM32_FUNCTION(2, "SPI5_NSS"),
+ STM32_FUNCTION(6, "SPI1_NSS I2S1_WS"),
+ STM32_FUNCTION(7, "SAI2_MCLK_A"),
+ STM32_FUNCTION(8, "USART1_RTS USART1_DE"),
+ STM32_FUNCTION(11, "SAI2_CK1"),
+ STM32_FUNCTION(12, "ETH1_MII_TXD2 ETH1_RGMII_TXD2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(35, "PC3"),
+ STM32_FUNCTION(0, "GPIOC3"),
+ STM32_FUNCTION(3, "SAI1_CK1"),
+ STM32_FUNCTION(4, "DFSDM1_CKOUT"),
+ STM32_FUNCTION(6, "SPI1_MISO I2S1_SDI"),
+ STM32_FUNCTION(7, "SPI1_SCK I2S1_CK"),
+ STM32_FUNCTION(9, "UART5_CTS"),
+ STM32_FUNCTION(11, "SAI1_MCLK_A"),
+ STM32_FUNCTION(12, "ETH1_MII_TX_CLK"),
+ STM32_FUNCTION(13, "ETH2_MII_TX_CLK"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(36, "PC4"),
+ STM32_FUNCTION(0, "GPIOC4"),
+ STM32_FUNCTION(3, "TIM3_ETR"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN2"),
+ STM32_FUNCTION(5, "SAI1_D3"),
+ STM32_FUNCTION(6, "I2S1_MCK"),
+ STM32_FUNCTION(9, "UART5_RTS UART5_DE"),
+ STM32_FUNCTION(10, "SPDIFRX_IN2"),
+ STM32_FUNCTION(12, "ETH1_MII_RXD0 ETH1_RGMII_RXD0 ETH1_RMII_RXD0"),
+ STM32_FUNCTION(13, "SAI2_D3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(37, "PC5"),
+ STM32_FUNCTION(0, "GPIOC5"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN2"),
+ STM32_FUNCTION(5, "SAI2_D4"),
+ STM32_FUNCTION(6, "I2S_CKIN"),
+ STM32_FUNCTION(7, "SAI1_D4"),
+ STM32_FUNCTION(8, "USART2_CTS USART2_NSS"),
+ STM32_FUNCTION(10, "SPDIFRX_IN3"),
+ STM32_FUNCTION(12, "ETH1_MII_RXD1 ETH1_RGMII_RXD1 ETH1_RMII_RXD1"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(38, "PC6"),
+ STM32_FUNCTION(0, "GPIOC6"),
+ STM32_FUNCTION(1, "TRACED2"),
+ STM32_FUNCTION(3, "TIM3_CH1"),
+ STM32_FUNCTION(4, "TIM8_CH1"),
+ STM32_FUNCTION(5, "DFSDM1_DATIN0"),
+ STM32_FUNCTION(6, "I2S3_MCK"),
+ STM32_FUNCTION(8, "USART6_TX"),
+ STM32_FUNCTION(9, "SDMMC1_D6"),
+ STM32_FUNCTION(10, "SDMMC2_D0DIR"),
+ STM32_FUNCTION(11, "SDMMC2_D6"),
+ STM32_FUNCTION(12, "LCD_B1"),
+ STM32_FUNCTION(13, "FMC_A19"),
+ STM32_FUNCTION(14, "LCD_R6"),
+ STM32_FUNCTION(15, "LCD_HSYNC"),
+ STM32_FUNCTION(16, "HDP2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(39, "PC7"),
+ STM32_FUNCTION(0, "GPIOC7"),
+ STM32_FUNCTION(1, "TRACED4"),
+ STM32_FUNCTION(3, "TIM3_CH2"),
+ STM32_FUNCTION(4, "TIM8_CH2"),
+ STM32_FUNCTION(7, "I2S2_MCK"),
+ STM32_FUNCTION(8, "USART6_RX"),
+ STM32_FUNCTION(9, "USART3_CTS"),
+ STM32_FUNCTION(10, "SDMMC2_CDIR"),
+ STM32_FUNCTION(11, "SDMMC2_D7"),
+ STM32_FUNCTION(12, "LCD_R1"),
+ STM32_FUNCTION(13, "SDMMC1_D7"),
+ STM32_FUNCTION(15, "LCD_G6"),
+ STM32_FUNCTION(16, "HDP4"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(40, "PC8"),
+ STM32_FUNCTION(0, "GPIOC8"),
+ STM32_FUNCTION(1, "TRACED0"),
+ STM32_FUNCTION(3, "TIM3_CH3"),
+ STM32_FUNCTION(4, "TIM8_CH3"),
+ STM32_FUNCTION(6, "SPI3_MISO I2S3_SDI"),
+ STM32_FUNCTION(8, "USART6_CK"),
+ STM32_FUNCTION(9, "USART3_CTS"),
+ STM32_FUNCTION(11, "SAI2_FS_B"),
+ STM32_FUNCTION(12, "UART5_RTS UART5_DE"),
+ STM32_FUNCTION(13, "SDMMC1_D0"),
+ STM32_FUNCTION(15, "LCD_G7"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(41, "PC9"),
+ STM32_FUNCTION(0, "GPIOC9"),
+ STM32_FUNCTION(1, "TRACED1"),
+ STM32_FUNCTION(3, "TIM3_CH4"),
+ STM32_FUNCTION(4, "TIM8_CH4"),
+ STM32_FUNCTION(8, "USART3_RTS"),
+ STM32_FUNCTION(9, "UART5_CTS"),
+ STM32_FUNCTION(10, "FDCAN1_TX"),
+ STM32_FUNCTION(13, "SDMMC1_D1"),
+ STM32_FUNCTION(15, "LCD_B4"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(42, "PC10"),
+ STM32_FUNCTION(0, "GPIOC10"),
+ STM32_FUNCTION(1, "TRACED2"),
+ STM32_FUNCTION(6, "I2C1_SCL"),
+ STM32_FUNCTION(7, "SPI3_SCK I2S3_CK"),
+ STM32_FUNCTION(8, "USART3_TX"),
+ STM32_FUNCTION(11, "SAI2_MCLK_B"),
+ STM32_FUNCTION(13, "SDMMC1_D2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(43, "PC11"),
+ STM32_FUNCTION(0, "GPIOC11"),
+ STM32_FUNCTION(1, "TRACED3"),
+ STM32_FUNCTION(5, "I2C1_SDA"),
+ STM32_FUNCTION(7, "SPI3_MOSI I2S3_SDO"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(9, "UART5_RX"),
+ STM32_FUNCTION(11, "SAI2_SCK_B"),
+ STM32_FUNCTION(13, "SDMMC1_D3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(44, "PC12"),
+ STM32_FUNCTION(0, "GPIOC12"),
+ STM32_FUNCTION(1, "TRACECLK"),
+ STM32_FUNCTION(9, "UART7_TX"),
+ STM32_FUNCTION(11, "SAI2_SD_B"),
+ STM32_FUNCTION(13, "SDMMC1_CK"),
+ STM32_FUNCTION(15, "LCD_DE"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(45, "PC13"),
+ STM32_FUNCTION(0, "GPIOC13"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(46, "PC14"),
+ STM32_FUNCTION(0, "GPIOC14"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(47, "PC15"),
+ STM32_FUNCTION(0, "GPIOC15"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(48, "PD0"),
+ STM32_FUNCTION(0, "GPIOD0"),
+ STM32_FUNCTION(3, "SAI1_MCLK_A"),
+ STM32_FUNCTION(7, "SAI1_CK1"),
+ STM32_FUNCTION(10, "FDCAN1_RX"),
+ STM32_FUNCTION(13, "FMC_D2 FMC_AD2"),
+ STM32_FUNCTION(14, "DCMIPP_D1"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(49, "PD1"),
+ STM32_FUNCTION(0, "GPIOD1"),
+ STM32_FUNCTION(5, "I2C5_SCL"),
+ STM32_FUNCTION(6, "SPI4_MOSI I2S4_SDO"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_NCS"),
+ STM32_FUNCTION(12, "LCD_B6"),
+ STM32_FUNCTION(13, "FMC_D3 FMC_AD3"),
+ STM32_FUNCTION(14, "DCMIPP_D13"),
+ STM32_FUNCTION(15, "LCD_G2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(50, "PD2"),
+ STM32_FUNCTION(0, "GPIOD2"),
+ STM32_FUNCTION(1, "TRACED4"),
+ STM32_FUNCTION(3, "TIM3_ETR"),
+ STM32_FUNCTION(5, "I2C1_SMBA"),
+ STM32_FUNCTION(6, "SPI3_NSS I2S3_WS"),
+ STM32_FUNCTION(7, "SAI2_D1"),
+ STM32_FUNCTION(8, "USART3_RX"),
+ STM32_FUNCTION(13, "SDMMC1_CMD"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(51, "PD3"),
+ STM32_FUNCTION(0, "GPIOD3"),
+ STM32_FUNCTION(3, "TIM2_CH1"),
+ STM32_FUNCTION(4, "USART2_CTS USART2_NSS"),
+ STM32_FUNCTION(5, "DFSDM1_CKOUT"),
+ STM32_FUNCTION(6, "I2C1_SDA"),
+ STM32_FUNCTION(7, "SAI1_D3"),
+ STM32_FUNCTION(13, "FMC_CLK"),
+ STM32_FUNCTION(14, "DCMIPP_D5"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(52, "PD4"),
+ STM32_FUNCTION(0, "GPIOD4"),
+ STM32_FUNCTION(4, "USART2_RTS USART2_DE"),
+ STM32_FUNCTION(6, "SPI3_MISO I2S3_SDI"),
+ STM32_FUNCTION(7, "DFSDM1_CKIN0"),
+ STM32_FUNCTION(10, "QUADSPI_CLK"),
+ STM32_FUNCTION(12, "LCD_R1"),
+ STM32_FUNCTION(13, "FMC_NOE"),
+ STM32_FUNCTION(14, "LCD_R4"),
+ STM32_FUNCTION(15, "LCD_R6"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(53, "PD5"),
+ STM32_FUNCTION(0, "GPIOD5"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO0"),
+ STM32_FUNCTION(13, "FMC_NWE"),
+ STM32_FUNCTION(14, "LCD_B0"),
+ STM32_FUNCTION(15, "LCD_G4"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(54, "PD6"),
+ STM32_FUNCTION(0, "GPIOD6"),
+ STM32_FUNCTION(2, "TIM16_CH1N"),
+ STM32_FUNCTION(3, "SAI1_D1"),
+ STM32_FUNCTION(7, "SAI1_SD_A"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(12, "TSC_G2_IO1"),
+ STM32_FUNCTION(14, "DCMIPP_D4"),
+ STM32_FUNCTION(15, "DCMIPP_D0"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(55, "PD7"),
+ STM32_FUNCTION(0, "GPIOD7"),
+ STM32_FUNCTION(1, "MCO1"),
+ STM32_FUNCTION(4, "USART2_CK"),
+ STM32_FUNCTION(5, "I2C2_SCL"),
+ STM32_FUNCTION(6, "I2C3_SDA"),
+ STM32_FUNCTION(10, "SPDIFRX_IN0"),
+ STM32_FUNCTION(11, "ETH1_MII_RX_CLK ETH1_RGMII_RX_CLK ETH1_RMII_REF_CLK"),
+ STM32_FUNCTION(12, "QUADSPI_BK1_IO2"),
+ STM32_FUNCTION(13, "FMC_NE1"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(56, "PD8"),
+ STM32_FUNCTION(0, "GPIOD8"),
+ STM32_FUNCTION(4, "USART2_TX"),
+ STM32_FUNCTION(6, "I2S4_WS"),
+ STM32_FUNCTION(8, "USART3_TX"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(11, "TSC_G1_IO3"),
+ STM32_FUNCTION(14, "DCMIPP_D9"),
+ STM32_FUNCTION(15, "DCMIPP_D3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(57, "PD9"),
+ STM32_FUNCTION(0, "GPIOD9"),
+ STM32_FUNCTION(1, "TRACECLK"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN3"),
+ STM32_FUNCTION(11, "SDMMC2_CDIR"),
+ STM32_FUNCTION(12, "LCD_B5"),
+ STM32_FUNCTION(13, "FMC_D14 FMC_AD14"),
+ STM32_FUNCTION(14, "LCD_CLK"),
+ STM32_FUNCTION(15, "LCD_B0"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(58, "PD10"),
+ STM32_FUNCTION(0, "GPIOD10"),
+ STM32_FUNCTION(1, "RTC_REFIN"),
+ STM32_FUNCTION(5, "I2C5_SMBA"),
+ STM32_FUNCTION(6, "SPI4_NSS I2S4_WS"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(10, "LCD_G5"),
+ STM32_FUNCTION(11, "TSC_G2_IO2"),
+ STM32_FUNCTION(12, "LCD_B7"),
+ STM32_FUNCTION(13, "FMC_D15 FMC_AD15"),
+ STM32_FUNCTION(14, "DCMIPP_VSYNC"),
+ STM32_FUNCTION(15, "LCD_B2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(59, "PD11"),
+ STM32_FUNCTION(0, "GPIOD11"),
+ STM32_FUNCTION(4, "LPTIM2_IN2"),
+ STM32_FUNCTION(5, "I2C4_SMBA"),
+ STM32_FUNCTION(8, "USART3_CTS USART3_NSS"),
+ STM32_FUNCTION(9, "SPDIFRX_IN0"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO2"),
+ STM32_FUNCTION(11, "ETH2_RGMII_CLK125"),
+ STM32_FUNCTION(12, "LCD_R7"),
+ STM32_FUNCTION(13, "FMC_CLE FMC_A16"),
+ STM32_FUNCTION(14, "UART7_RX"),
+ STM32_FUNCTION(15, "DCMIPP_D4"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(60, "PD12"),
+ STM32_FUNCTION(0, "GPIOD12"),
+ STM32_FUNCTION(2, "LPTIM1_IN1"),
+ STM32_FUNCTION(3, "TIM4_CH1"),
+ STM32_FUNCTION(6, "I2C1_SCL"),
+ STM32_FUNCTION(8, "USART3_RTS USART3_DE"),
+ STM32_FUNCTION(13, "FMC_ALE FMC_A17"),
+ STM32_FUNCTION(14, "DCMIPP_D6"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(61, "PD13"),
+ STM32_FUNCTION(0, "GPIOD13"),
+ STM32_FUNCTION(2, "LPTIM2_ETR"),
+ STM32_FUNCTION(3, "TIM4_CH2"),
+ STM32_FUNCTION(4, "TIM8_CH2"),
+ STM32_FUNCTION(5, "SAI1_CK1"),
+ STM32_FUNCTION(7, "SAI1_MCLK_A"),
+ STM32_FUNCTION(8, "USART1_RX"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO3"),
+ STM32_FUNCTION(11, "TSC_G2_IO4"),
+ STM32_FUNCTION(12, "QUADSPI_BK2_IO2"),
+ STM32_FUNCTION(13, "FMC_A18"),
+ STM32_FUNCTION(15, "LCD_G4"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(62, "PD14"),
+ STM32_FUNCTION(0, "GPIOD14"),
+ STM32_FUNCTION(3, "TIM4_CH3"),
+ STM32_FUNCTION(5, "I2C3_SDA"),
+ STM32_FUNCTION(8, "USART1_RX"),
+ STM32_FUNCTION(9, "UART8_CTS"),
+ STM32_FUNCTION(13, "FMC_D0 FMC_AD0"),
+ STM32_FUNCTION(14, "DCMIPP_D8"),
+ STM32_FUNCTION(15, "LCD_R4"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(63, "PD15"),
+ STM32_FUNCTION(0, "GPIOD15"),
+ STM32_FUNCTION(2, "USART2_RX"),
+ STM32_FUNCTION(3, "TIM4_CH4"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN2"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO3"),
+ STM32_FUNCTION(13, "FMC_D1 FMC_AD1"),
+ STM32_FUNCTION(15, "LCD_B5"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(64, "PE0"),
+ STM32_FUNCTION(0, "GPIOE0"),
+ STM32_FUNCTION(7, "DCMIPP_D12"),
+ STM32_FUNCTION(9, "UART8_RX"),
+ STM32_FUNCTION(10, "FDCAN2_RX"),
+ STM32_FUNCTION(11, "TSC_G4_IO1"),
+ STM32_FUNCTION(12, "LCD_B1"),
+ STM32_FUNCTION(13, "FMC_A11"),
+ STM32_FUNCTION(14, "DCMIPP_D1"),
+ STM32_FUNCTION(15, "LCD_B5"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(65, "PE1"),
+ STM32_FUNCTION(0, "GPIOE1"),
+ STM32_FUNCTION(2, "LPTIM1_IN2"),
+ STM32_FUNCTION(4, "TSC_G2_IO3"),
+ STM32_FUNCTION(9, "UART8_TX"),
+ STM32_FUNCTION(10, "LCD_HSYNC"),
+ STM32_FUNCTION(12, "LCD_R4"),
+ STM32_FUNCTION(13, "FMC_NBL1"),
+ STM32_FUNCTION(14, "DCMIPP_D3"),
+ STM32_FUNCTION(15, "DCMIPP_D12"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(66, "PE2"),
+ STM32_FUNCTION(0, "GPIOE2"),
+ STM32_FUNCTION(1, "TRACECLK"),
+ STM32_FUNCTION(2, "TIM2_ETR"),
+ STM32_FUNCTION(4, "TSC_G5_IO1"),
+ STM32_FUNCTION(5, "I2C4_SCL"),
+ STM32_FUNCTION(6, "SPI5_MOSI"),
+ STM32_FUNCTION(7, "SAI1_FS_B"),
+ STM32_FUNCTION(8, "USART6_RTS USART6_DE"),
+ STM32_FUNCTION(10, "SPDIFRX_IN1"),
+ STM32_FUNCTION(11, "ETH2_MII_RXD1 ETH2_RGMII_RXD1 ETH2_RMII_RXD1"),
+ STM32_FUNCTION(13, "FMC_A23"),
+ STM32_FUNCTION(15, "LCD_R1"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(67, "PE3"),
+ STM32_FUNCTION(0, "GPIOE3"),
+ STM32_FUNCTION(1, "TRACED11"),
+ STM32_FUNCTION(3, "SAI2_D4"),
+ STM32_FUNCTION(5, "TIM15_BKIN"),
+ STM32_FUNCTION(6, "SPI4_MISO I2S4_SDI"),
+ STM32_FUNCTION(9, "USART3_RTS USART3_DE"),
+ STM32_FUNCTION(10, "FDCAN1_RX"),
+ STM32_FUNCTION(11, "SDMMC2_CK"),
+ STM32_FUNCTION(14, "LCD_R4"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(68, "PE4"),
+ STM32_FUNCTION(0, "GPIOE4"),
+ STM32_FUNCTION(2, "SPI5_MISO"),
+ STM32_FUNCTION(3, "SAI1_D2"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN3"),
+ STM32_FUNCTION(5, "TIM15_CH1N"),
+ STM32_FUNCTION(6, "I2S_CKIN"),
+ STM32_FUNCTION(7, "SAI1_FS_A"),
+ STM32_FUNCTION(8, "UART7_RTS UART7_DE"),
+ STM32_FUNCTION(9, "UART8_TX"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_NCS"),
+ STM32_FUNCTION(11, "FMC_NCE2"),
+ STM32_FUNCTION(12, "TSC_G1_IO1"),
+ STM32_FUNCTION(13, "FMC_A25"),
+ STM32_FUNCTION(14, "DCMIPP_D3"),
+ STM32_FUNCTION(15, "LCD_G7"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(69, "PE5"),
+ STM32_FUNCTION(0, "GPIOE5"),
+ STM32_FUNCTION(3, "SAI2_SCK_B"),
+ STM32_FUNCTION(4, "TIM8_CH3"),
+ STM32_FUNCTION(5, "TIM15_CH1"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(11, "ETH1_MII_TXD3 ETH1_RGMII_TXD3"),
+ STM32_FUNCTION(13, "FMC_NE1"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(70, "PE6"),
+ STM32_FUNCTION(0, "GPIOE6"),
+ STM32_FUNCTION(1, "MCO2"),
+ STM32_FUNCTION(2, "TIM1_BKIN2"),
+ STM32_FUNCTION(3, "SAI2_SCK_B"),
+ STM32_FUNCTION(5, "TIM15_CH2"),
+ STM32_FUNCTION(6, "I2C3_SMBA"),
+ STM32_FUNCTION(7, "SAI1_SCK_B"),
+ STM32_FUNCTION(9, "UART4_RTS UART4_DE"),
+ STM32_FUNCTION(12, "ETH2_MII_TXD3 ETH2_RGMII_TXD3"),
+ STM32_FUNCTION(13, "FMC_A22"),
+ STM32_FUNCTION(14, "DCMIPP_D7"),
+ STM32_FUNCTION(15, "LCD_G3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(71, "PE7"),
+ STM32_FUNCTION(0, "GPIOE7"),
+ STM32_FUNCTION(2, "TIM1_ETR"),
+ STM32_FUNCTION(5, "LPTIM2_IN1"),
+ STM32_FUNCTION(9, "UART5_TX"),
+ STM32_FUNCTION(13, "FMC_D4 FMC_AD4"),
+ STM32_FUNCTION(14, "LCD_B3"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(72, "PE8"),
+ STM32_FUNCTION(0, "GPIOE8"),
+ STM32_FUNCTION(2, "TIM1_CH1N"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN2"),
+ STM32_FUNCTION(6, "I2C1_SDA"),
+ STM32_FUNCTION(8, "UART7_TX"),
+ STM32_FUNCTION(13, "FMC_D5 FMC_AD5"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(73, "PE9"),
+ STM32_FUNCTION(0, "GPIOE9"),
+ STM32_FUNCTION(2, "TIM1_CH1"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO1"),
+ STM32_FUNCTION(12, "LCD_HSYNC"),
+ STM32_FUNCTION(13, "FMC_D6 FMC_AD6"),
+ STM32_FUNCTION(14, "DCMIPP_D7"),
+ STM32_FUNCTION(15, "LCD_R7"),
+ STM32_FUNCTION(16, "HDP3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(74, "PE10"),
+ STM32_FUNCTION(0, "GPIOE10"),
+ STM32_FUNCTION(2, "TIM1_CH2N"),
+ STM32_FUNCTION(8, "UART7_RX"),
+ STM32_FUNCTION(10, "FDCAN1_TX"),
+ STM32_FUNCTION(13, "FMC_D7 FMC_AD7"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(75, "PE11"),
+ STM32_FUNCTION(0, "GPIOE11"),
+ STM32_FUNCTION(2, "TIM1_CH2"),
+ STM32_FUNCTION(3, "USART2_CTS USART2_NSS"),
+ STM32_FUNCTION(5, "SAI1_D2"),
+ STM32_FUNCTION(6, "SPI4_MOSI I2S4_SDO"),
+ STM32_FUNCTION(7, "SAI1_FS_A"),
+ STM32_FUNCTION(8, "USART6_CK"),
+ STM32_FUNCTION(10, "LCD_R0"),
+ STM32_FUNCTION(11, "ETH2_MII_TX_ER"),
+ STM32_FUNCTION(12, "ETH1_MII_TX_ER"),
+ STM32_FUNCTION(13, "FMC_D8 FMC_AD8"),
+ STM32_FUNCTION(14, "DCMIPP_D10"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(76, "PE12"),
+ STM32_FUNCTION(0, "GPIOE12"),
+ STM32_FUNCTION(2, "TIM1_CH3N"),
+ STM32_FUNCTION(6, "SPI4_SCK I2S4_CK"),
+ STM32_FUNCTION(9, "UART8_RTS UART8_DE"),
+ STM32_FUNCTION(10, "LCD_VSYNC"),
+ STM32_FUNCTION(11, "TSC_G3_IO2"),
+ STM32_FUNCTION(12, "LCD_G4"),
+ STM32_FUNCTION(13, "FMC_D9 FMC_AD9"),
+ STM32_FUNCTION(14, "DCMIPP_D11"),
+ STM32_FUNCTION(15, "LCD_G6"),
+ STM32_FUNCTION(16, "HDP4"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(77, "PE13"),
+ STM32_FUNCTION(0, "GPIOE13"),
+ STM32_FUNCTION(2, "TIM1_CH3"),
+ STM32_FUNCTION(5, "I2C5_SDA"),
+ STM32_FUNCTION(6, "SPI4_MISO I2S4_SDI"),
+ STM32_FUNCTION(12, "LCD_B1"),
+ STM32_FUNCTION(13, "FMC_D10 FMC_AD10"),
+ STM32_FUNCTION(14, "DCMIPP_D4"),
+ STM32_FUNCTION(15, "LCD_R6"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(78, "PE14"),
+ STM32_FUNCTION(0, "GPIOE14"),
+ STM32_FUNCTION(2, "TIM1_BKIN"),
+ STM32_FUNCTION(5, "SAI1_D4"),
+ STM32_FUNCTION(9, "UART8_RTS UART8_DE"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_NCS"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO2"),
+ STM32_FUNCTION(13, "FMC_D11 FMC_AD11"),
+ STM32_FUNCTION(14, "DCMIPP_D7"),
+ STM32_FUNCTION(15, "LCD_G0"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(79, "PE15"),
+ STM32_FUNCTION(0, "GPIOE15"),
+ STM32_FUNCTION(2, "TIM2_ETR"),
+ STM32_FUNCTION(3, "TIM1_BKIN"),
+ STM32_FUNCTION(4, "USART2_CTS USART2_NSS"),
+ STM32_FUNCTION(7, "I2C4_SCL"),
+ STM32_FUNCTION(13, "FMC_D12 FMC_AD12"),
+ STM32_FUNCTION(14, "DCMIPP_D10"),
+ STM32_FUNCTION(15, "LCD_B7"),
+ STM32_FUNCTION(16, "HDP7"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(80, "PF0"),
+ STM32_FUNCTION(0, "GPIOF0"),
+ STM32_FUNCTION(1, "TRACED13"),
+ STM32_FUNCTION(4, "DFSDM1_CKOUT"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(11, "SDMMC2_D4"),
+ STM32_FUNCTION(13, "FMC_A0"),
+ STM32_FUNCTION(14, "LCD_R6"),
+ STM32_FUNCTION(15, "LCD_G0"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(81, "PF1"),
+ STM32_FUNCTION(0, "GPIOF1"),
+ STM32_FUNCTION(1, "TRACED7"),
+ STM32_FUNCTION(5, "I2C2_SDA"),
+ STM32_FUNCTION(6, "SPI3_MOSI I2S3_SDO"),
+ STM32_FUNCTION(13, "FMC_A1"),
+ STM32_FUNCTION(14, "LCD_B7"),
+ STM32_FUNCTION(15, "LCD_G1"),
+ STM32_FUNCTION(16, "HDP7"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(82, "PF2"),
+ STM32_FUNCTION(0, "GPIOF2"),
+ STM32_FUNCTION(1, "TRACED1"),
+ STM32_FUNCTION(5, "I2C2_SCL"),
+ STM32_FUNCTION(7, "DFSDM1_CKIN1"),
+ STM32_FUNCTION(8, "USART6_CK"),
+ STM32_FUNCTION(10, "SDMMC2_D0DIR"),
+ STM32_FUNCTION(12, "SDMMC1_D0DIR"),
+ STM32_FUNCTION(13, "FMC_A2"),
+ STM32_FUNCTION(14, "LCD_G4"),
+ STM32_FUNCTION(15, "LCD_B3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(83, "PF3"),
+ STM32_FUNCTION(0, "GPIOF3"),
+ STM32_FUNCTION(4, "LPTIM2_IN2"),
+ STM32_FUNCTION(5, "I2C5_SDA"),
+ STM32_FUNCTION(6, "SPI4_MISO I2S4_SDI"),
+ STM32_FUNCTION(7, "SPI3_NSS I2S3_WS"),
+ STM32_FUNCTION(13, "FMC_A3"),
+ STM32_FUNCTION(15, "LCD_G3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(84, "PF4"),
+ STM32_FUNCTION(0, "GPIOF4"),
+ STM32_FUNCTION(4, "USART2_RX"),
+ STM32_FUNCTION(11, "TSC_G3_IO3"),
+ STM32_FUNCTION(12, "ETH2_MII_RXD0 ETH2_RGMII_RXD0 ETH2_RMII_RXD0"),
+ STM32_FUNCTION(13, "FMC_A4"),
+ STM32_FUNCTION(14, "DCMIPP_D4"),
+ STM32_FUNCTION(15, "LCD_B6"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(85, "PF5"),
+ STM32_FUNCTION(0, "GPIOF5"),
+ STM32_FUNCTION(1, "TRACED12"),
+ STM32_FUNCTION(5, "DFSDM1_CKIN0"),
+ STM32_FUNCTION(6, "I2C1_SMBA"),
+ STM32_FUNCTION(10, "LCD_G0"),
+ STM32_FUNCTION(13, "FMC_A5"),
+ STM32_FUNCTION(14, "DCMIPP_D11"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(86, "PF6"),
+ STM32_FUNCTION(0, "GPIOF6"),
+ STM32_FUNCTION(2, "TIM16_CH1"),
+ STM32_FUNCTION(6, "SPI5_NSS"),
+ STM32_FUNCTION(8, "UART7_RX"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO2"),
+ STM32_FUNCTION(12, "ETH2_MII_TX_EN ETH2_RGMII_TX_CTL ETH2_RMII_TX_EN"),
+ STM32_FUNCTION(14, "LCD_R7"),
+ STM32_FUNCTION(15, "LCD_G4"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(87, "PF7"),
+ STM32_FUNCTION(0, "GPIOF7"),
+ STM32_FUNCTION(2, "TIM17_CH1"),
+ STM32_FUNCTION(8, "UART7_TX"),
+ STM32_FUNCTION(9, "UART4_CTS"),
+ STM32_FUNCTION(11, "ETH1_RGMII_CLK125"),
+ STM32_FUNCTION(12, "ETH2_MII_TXD0 ETH2_RGMII_TXD0 ETH2_RMII_TXD0"),
+ STM32_FUNCTION(13, "FMC_A18"),
+ STM32_FUNCTION(15, "LCD_G2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(88, "PF8"),
+ STM32_FUNCTION(0, "GPIOF8"),
+ STM32_FUNCTION(2, "TIM16_CH1N"),
+ STM32_FUNCTION(3, "TIM4_CH3"),
+ STM32_FUNCTION(4, "TIM8_CH3"),
+ STM32_FUNCTION(7, "SAI1_SCK_B"),
+ STM32_FUNCTION(8, "USART6_TX"),
+ STM32_FUNCTION(10, "TIM13_CH1"),
+ STM32_FUNCTION(11, "QUADSPI_BK1_IO0"),
+ STM32_FUNCTION(14, "DCMIPP_D15"),
+ STM32_FUNCTION(15, "LCD_B3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(89, "PF9"),
+ STM32_FUNCTION(0, "GPIOF9"),
+ STM32_FUNCTION(2, "TIM17_CH1N"),
+ STM32_FUNCTION(3, "TIM1_CH1"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN3"),
+ STM32_FUNCTION(7, "SAI1_D4"),
+ STM32_FUNCTION(8, "UART7_CTS"),
+ STM32_FUNCTION(9, "UART8_RX"),
+ STM32_FUNCTION(10, "TIM14_CH1"),
+ STM32_FUNCTION(11, "QUADSPI_BK1_IO1"),
+ STM32_FUNCTION(12, "QUADSPI_BK2_IO3"),
+ STM32_FUNCTION(13, "FMC_A9"),
+ STM32_FUNCTION(15, "LCD_B6"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(90, "PF10"),
+ STM32_FUNCTION(0, "GPIOF10"),
+ STM32_FUNCTION(2, "TIM16_BKIN"),
+ STM32_FUNCTION(3, "SAI1_D3"),
+ STM32_FUNCTION(4, "TIM8_BKIN"),
+ STM32_FUNCTION(6, "SPI5_NSS"),
+ STM32_FUNCTION(8, "USART6_RTS USART6_DE"),
+ STM32_FUNCTION(9, "UART7_RTS UART7_DE"),
+ STM32_FUNCTION(10, "QUADSPI_CLK"),
+ STM32_FUNCTION(14, "DCMIPP_HSYNC"),
+ STM32_FUNCTION(15, "LCD_B5"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(91, "PF11"),
+ STM32_FUNCTION(0, "GPIOF11"),
+ STM32_FUNCTION(2, "USART2_TX"),
+ STM32_FUNCTION(3, "SAI1_D2"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN3"),
+ STM32_FUNCTION(7, "SAI1_FS_A"),
+ STM32_FUNCTION(13, "ETH2_MII_RX_ER"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(92, "PF12"),
+ STM32_FUNCTION(0, "GPIOF12"),
+ STM32_FUNCTION(6, "SPI1_NSS I2S1_WS"),
+ STM32_FUNCTION(7, "SAI1_SD_A"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(11, "ETH1_MII_TX_ER"),
+ STM32_FUNCTION(12, "ETH1_RGMII_CLK125"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(93, "PF13"),
+ STM32_FUNCTION(0, "GPIOF13"),
+ STM32_FUNCTION(2, "TIM2_ETR"),
+ STM32_FUNCTION(3, "SAI1_MCLK_B"),
+ STM32_FUNCTION(7, "DFSDM1_DATIN3"),
+ STM32_FUNCTION(8, "USART2_TX"),
+ STM32_FUNCTION(9, "UART5_RX"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(94, "PF14"),
+ STM32_FUNCTION(0, "GPIOF14"),
+ STM32_FUNCTION(1, "JTCK SWCLK"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(95, "PF15"),
+ STM32_FUNCTION(0, "GPIOF15"),
+ STM32_FUNCTION(1, "JTMS SWDIO"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(96, "PG0"),
+ STM32_FUNCTION(0, "GPIOG0"),
+ STM32_FUNCTION(10, "FDCAN2_TX"),
+ STM32_FUNCTION(11, "TSC_G4_IO2"),
+ STM32_FUNCTION(13, "FMC_A10"),
+ STM32_FUNCTION(14, "DCMIPP_PIXCLK"),
+ STM32_FUNCTION(15, "LCD_G5"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(97, "PG1"),
+ STM32_FUNCTION(0, "GPIOG1"),
+ STM32_FUNCTION(2, "LPTIM1_ETR"),
+ STM32_FUNCTION(3, "TIM4_ETR"),
+ STM32_FUNCTION(4, "SAI2_FS_A"),
+ STM32_FUNCTION(5, "I2C2_SMBA"),
+ STM32_FUNCTION(6, "SPI2_MISO I2S2_SDI"),
+ STM32_FUNCTION(7, "SAI2_D2"),
+ STM32_FUNCTION(10, "FDCAN2_TX"),
+ STM32_FUNCTION(11, "ETH2_MII_TXD2 ETH2_RGMII_TXD2"),
+ STM32_FUNCTION(13, "FMC_NBL0"),
+ STM32_FUNCTION(15, "LCD_G7"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(98, "PG2"),
+ STM32_FUNCTION(0, "GPIOG2"),
+ STM32_FUNCTION(2, "MCO2"),
+ STM32_FUNCTION(4, "TIM8_BKIN"),
+ STM32_FUNCTION(11, "SAI2_MCLK_B"),
+ STM32_FUNCTION(12, "ETH1_MDC"),
+ STM32_FUNCTION(14, "DCMIPP_D1"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(99, "PG3"),
+ STM32_FUNCTION(0, "GPIOG3"),
+ STM32_FUNCTION(4, "TIM8_BKIN2"),
+ STM32_FUNCTION(5, "I2C2_SDA"),
+ STM32_FUNCTION(7, "SAI2_SD_B"),
+ STM32_FUNCTION(10, "FDCAN2_RX"),
+ STM32_FUNCTION(11, "ETH2_RGMII_GTX_CLK"),
+ STM32_FUNCTION(12, "ETH1_MDIO"),
+ STM32_FUNCTION(13, "FMC_A13"),
+ STM32_FUNCTION(14, "DCMIPP_D15"),
+ STM32_FUNCTION(15, "DCMIPP_D12"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(100, "PG4"),
+ STM32_FUNCTION(0, "GPIOG4"),
+ STM32_FUNCTION(1, "TRACED1"),
+ STM32_FUNCTION(2, "TIM1_BKIN2"),
+ STM32_FUNCTION(5, "DFSDM1_CKIN3"),
+ STM32_FUNCTION(9, "USART3_RX"),
+ STM32_FUNCTION(11, "SDMMC2_D123DIR"),
+ STM32_FUNCTION(12, "LCD_VSYNC"),
+ STM32_FUNCTION(13, "FMC_A14"),
+ STM32_FUNCTION(14, "DCMIPP_D8"),
+ STM32_FUNCTION(15, "DCMIPP_D13"),
+ STM32_FUNCTION(16, "HDP1"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(101, "PG5"),
+ STM32_FUNCTION(0, "GPIOG5"),
+ STM32_FUNCTION(2, "TIM17_CH1"),
+ STM32_FUNCTION(11, "ETH2_MDC"),
+ STM32_FUNCTION(12, "LCD_G4"),
+ STM32_FUNCTION(13, "FMC_A15"),
+ STM32_FUNCTION(14, "DCMIPP_VSYNC"),
+ STM32_FUNCTION(15, "DCMIPP_D3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(102, "PG6"),
+ STM32_FUNCTION(0, "GPIOG6"),
+ STM32_FUNCTION(1, "TRACED3"),
+ STM32_FUNCTION(2, "TIM17_BKIN"),
+ STM32_FUNCTION(3, "TIM5_CH4"),
+ STM32_FUNCTION(4, "SAI2_D1"),
+ STM32_FUNCTION(5, "USART1_RX"),
+ STM32_FUNCTION(7, "SAI2_SD_A"),
+ STM32_FUNCTION(11, "SDMMC2_CMD"),
+ STM32_FUNCTION(12, "LCD_G0"),
+ STM32_FUNCTION(14, "LCD_DE"),
+ STM32_FUNCTION(15, "LCD_R7"),
+ STM32_FUNCTION(16, "HDP3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(103, "PG7"),
+ STM32_FUNCTION(0, "GPIOG7"),
+ STM32_FUNCTION(1, "TRACED8"),
+ STM32_FUNCTION(2, "TIM1_ETR"),
+ STM32_FUNCTION(6, "SPI3_MISO I2S3_SDI"),
+ STM32_FUNCTION(9, "UART7_CTS"),
+ STM32_FUNCTION(11, "SDMMC2_CKIN"),
+ STM32_FUNCTION(12, "LCD_R1"),
+ STM32_FUNCTION(14, "LCD_R5"),
+ STM32_FUNCTION(15, "LCD_R2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(104, "PG8"),
+ STM32_FUNCTION(0, "GPIOG8"),
+ STM32_FUNCTION(2, "TIM2_CH1"),
+ STM32_FUNCTION(4, "TIM8_ETR"),
+ STM32_FUNCTION(6, "SPI5_MISO"),
+ STM32_FUNCTION(7, "SAI1_MCLK_B"),
+ STM32_FUNCTION(8, "LCD_B1"),
+ STM32_FUNCTION(9, "USART3_RTS USART3_DE"),
+ STM32_FUNCTION(10, "SPDIFRX_IN2"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO2"),
+ STM32_FUNCTION(12, "QUADSPI_BK1_IO3"),
+ STM32_FUNCTION(13, "FMC_NE2"),
+ STM32_FUNCTION(14, "ETH2_CLK"),
+ STM32_FUNCTION(15, "DCMIPP_D6"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(105, "PG9"),
+ STM32_FUNCTION(0, "GPIOG9"),
+ STM32_FUNCTION(1, "DBTRGO"),
+ STM32_FUNCTION(5, "I2C2_SDA"),
+ STM32_FUNCTION(8, "USART6_RX"),
+ STM32_FUNCTION(9, "SPDIFRX_IN3"),
+ STM32_FUNCTION(10, "FDCAN1_RX"),
+ STM32_FUNCTION(11, "FMC_NE2"),
+ STM32_FUNCTION(13, "FMC_NCE"),
+ STM32_FUNCTION(14, "DCMIPP_VSYNC"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(106, "PG10"),
+ STM32_FUNCTION(0, "GPIOG10"),
+ STM32_FUNCTION(6, "SPI5_SCK"),
+ STM32_FUNCTION(7, "SAI1_SD_B"),
+ STM32_FUNCTION(9, "UART8_CTS"),
+ STM32_FUNCTION(10, "FDCAN1_TX"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO1"),
+ STM32_FUNCTION(13, "FMC_NE3"),
+ STM32_FUNCTION(14, "DCMIPP_D2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(107, "PG11"),
+ STM32_FUNCTION(0, "GPIOG11"),
+ STM32_FUNCTION(5, "SAI2_D3"),
+ STM32_FUNCTION(6, "I2S2_MCK"),
+ STM32_FUNCTION(8, "USART3_TX"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(11, "ETH2_MII_TXD1 ETH2_RGMII_TXD1 ETH2_RMII_TXD1"),
+ STM32_FUNCTION(13, "FMC_A24"),
+ STM32_FUNCTION(14, "DCMIPP_D14"),
+ STM32_FUNCTION(15, "LCD_B2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(108, "PG12"),
+ STM32_FUNCTION(0, "GPIOG12"),
+ STM32_FUNCTION(2, "LPTIM1_IN1"),
+ STM32_FUNCTION(4, "TSC_G5_IO2"),
+ STM32_FUNCTION(5, "SAI2_SCK_A"),
+ STM32_FUNCTION(7, "SAI2_CK2"),
+ STM32_FUNCTION(8, "USART6_RTS USART6_DE"),
+ STM32_FUNCTION(9, "USART3_CTS"),
+ STM32_FUNCTION(11, "ETH2_PHY_INTN"),
+ STM32_FUNCTION(12, "ETH1_PHY_INTN"),
+ STM32_FUNCTION(13, "ETH2_MII_RX_DV ETH2_RGMII_RX_CTL ETH2_RMII_CRS_DV"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(109, "PG13"),
+ STM32_FUNCTION(0, "GPIOG13"),
+ STM32_FUNCTION(2, "LPTIM1_OUT"),
+ STM32_FUNCTION(8, "USART6_CTS USART6_NSS"),
+ STM32_FUNCTION(12, "ETH1_MII_TXD0 ETH1_RGMII_TXD0 ETH1_RMII_TXD0"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(110, "PG14"),
+ STM32_FUNCTION(0, "GPIOG14"),
+ STM32_FUNCTION(2, "LPTIM1_ETR"),
+ STM32_FUNCTION(7, "SAI2_D1"),
+ STM32_FUNCTION(8, "USART6_TX"),
+ STM32_FUNCTION(11, "SAI2_SD_A"),
+ STM32_FUNCTION(12, "ETH1_MII_TXD1 ETH1_RGMII_TXD1 ETH1_RMII_TXD1"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(111, "PG15"),
+ STM32_FUNCTION(0, "GPIOG15"),
+ STM32_FUNCTION(8, "USART6_CTS USART6_NSS"),
+ STM32_FUNCTION(9, "UART7_CTS"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO1"),
+ STM32_FUNCTION(11, "ETH2_PHY_INTN"),
+ STM32_FUNCTION(12, "LCD_B4"),
+ STM32_FUNCTION(14, "DCMIPP_D10"),
+ STM32_FUNCTION(15, "LCD_B3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(112, "PH0"),
+ STM32_FUNCTION(0, "GPIOH0"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(113, "PH1"),
+ STM32_FUNCTION(0, "GPIOH1"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(114, "PH2"),
+ STM32_FUNCTION(0, "GPIOH2"),
+ STM32_FUNCTION(2, "LPTIM1_IN2"),
+ STM32_FUNCTION(4, "TSC_G4_IO3"),
+ STM32_FUNCTION(7, "DCMIPP_D9"),
+ STM32_FUNCTION(8, "LCD_G1"),
+ STM32_FUNCTION(9, "UART7_TX"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO0"),
+ STM32_FUNCTION(11, "ETH2_MII_CRS"),
+ STM32_FUNCTION(12, "ETH1_MII_CRS"),
+ STM32_FUNCTION(13, "FMC_NE4"),
+ STM32_FUNCTION(14, "ETH2_RGMII_CLK125"),
+ STM32_FUNCTION(15, "LCD_B0"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(115, "PH3"),
+ STM32_FUNCTION(0, "GPIOH3"),
+ STM32_FUNCTION(5, "I2C3_SCL"),
+ STM32_FUNCTION(6, "SPI5_MOSI"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO1"),
+ STM32_FUNCTION(11, "ETH1_MII_COL"),
+ STM32_FUNCTION(12, "LCD_R5"),
+ STM32_FUNCTION(13, "ETH2_MII_COL"),
+ STM32_FUNCTION(14, "QUADSPI_BK1_IO0"),
+ STM32_FUNCTION(15, "LCD_B4"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(116, "PH4"),
+ STM32_FUNCTION(0, "GPIOH4"),
+ STM32_FUNCTION(1, "JTDI"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(117, "PH5"),
+ STM32_FUNCTION(0, "GPIOH5"),
+ STM32_FUNCTION(1, "JTDO"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(118, "PH6"),
+ STM32_FUNCTION(0, "GPIOH6"),
+ STM32_FUNCTION(3, "TIM12_CH1"),
+ STM32_FUNCTION(4, "USART2_CK"),
+ STM32_FUNCTION(5, "I2C5_SDA"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO2"),
+ STM32_FUNCTION(11, "ETH1_PHY_INTN"),
+ STM32_FUNCTION(12, "ETH1_MII_RX_ER"),
+ STM32_FUNCTION(13, "ETH2_MII_RXD2 ETH2_RGMII_RXD2"),
+ STM32_FUNCTION(14, "QUADSPI_BK1_NCS"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(119, "PH7"),
+ STM32_FUNCTION(0, "GPIOH7"),
+ STM32_FUNCTION(3, "SAI2_FS_B"),
+ STM32_FUNCTION(6, "I2C3_SDA"),
+ STM32_FUNCTION(7, "SPI5_SCK"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO3"),
+ STM32_FUNCTION(11, "ETH2_MII_TX_CLK"),
+ STM32_FUNCTION(12, "ETH1_MII_TX_CLK"),
+ STM32_FUNCTION(14, "QUADSPI_BK1_IO3"),
+ STM32_FUNCTION(15, "LCD_B2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(120, "PH8"),
+ STM32_FUNCTION(0, "GPIOH8"),
+ STM32_FUNCTION(1, "TRACED9"),
+ STM32_FUNCTION(3, "TIM5_ETR"),
+ STM32_FUNCTION(4, "USART2_RX"),
+ STM32_FUNCTION(5, "I2C3_SDA"),
+ STM32_FUNCTION(12, "LCD_R6"),
+ STM32_FUNCTION(13, "FMC_A8"),
+ STM32_FUNCTION(14, "DCMIPP_HSYNC"),
+ STM32_FUNCTION(15, "LCD_R2"),
+ STM32_FUNCTION(16, "HDP2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(121, "PH9"),
+ STM32_FUNCTION(0, "GPIOH9"),
+ STM32_FUNCTION(2, "TIM1_CH4"),
+ STM32_FUNCTION(3, "TIM12_CH2"),
+ STM32_FUNCTION(4, "TSC_SYNC"),
+ STM32_FUNCTION(6, "SPI4_SCK I2S4_CK"),
+ STM32_FUNCTION(7, "DCMIPP_D13"),
+ STM32_FUNCTION(10, "LCD_B5"),
+ STM32_FUNCTION(12, "LCD_DE"),
+ STM32_FUNCTION(13, "FMC_A20"),
+ STM32_FUNCTION(14, "DCMIPP_D9"),
+ STM32_FUNCTION(15, "DCMIPP_D8"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(122, "PH10"),
+ STM32_FUNCTION(0, "GPIOH10"),
+ STM32_FUNCTION(1, "TRACED0"),
+ STM32_FUNCTION(3, "TIM5_CH1"),
+ STM32_FUNCTION(4, "SAI2_D3"),
+ STM32_FUNCTION(5, "DFSDM1_DATIN2"),
+ STM32_FUNCTION(6, "I2S3_MCK"),
+ STM32_FUNCTION(7, "SPI2_MOSI I2S2_SDO"),
+ STM32_FUNCTION(8, "USART3_CTS USART3_NSS"),
+ STM32_FUNCTION(9, "SDMMC1_D4"),
+ STM32_FUNCTION(14, "LCD_HSYNC"),
+ STM32_FUNCTION(15, "LCD_R2"),
+ STM32_FUNCTION(16, "HDP0"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(123, "PH11"),
+ STM32_FUNCTION(0, "GPIOH11"),
+ STM32_FUNCTION(2, "SPI5_NSS"),
+ STM32_FUNCTION(3, "TIM5_CH2"),
+ STM32_FUNCTION(4, "SAI2_SD_A"),
+ STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(7, "I2C4_SCL"),
+ STM32_FUNCTION(8, "USART6_RX"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO0"),
+ STM32_FUNCTION(12, "ETH2_MII_RX_CLK ETH2_RGMII_RX_CLK ETH2_RMII_REF_CLK"),
+ STM32_FUNCTION(13, "FMC_A12"),
+ STM32_FUNCTION(15, "LCD_G6"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(124, "PH12"),
+ STM32_FUNCTION(0, "GPIOH12"),
+ STM32_FUNCTION(2, "USART2_TX"),
+ STM32_FUNCTION(3, "TIM5_CH3"),
+ STM32_FUNCTION(4, "DFSDM1_CKIN1"),
+ STM32_FUNCTION(5, "I2C3_SCL"),
+ STM32_FUNCTION(6, "SPI5_MOSI"),
+ STM32_FUNCTION(7, "SAI1_SCK_A"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO2"),
+ STM32_FUNCTION(11, "SAI1_CK2"),
+ STM32_FUNCTION(12, "ETH1_MII_CRS"),
+ STM32_FUNCTION(13, "FMC_A6"),
+ STM32_FUNCTION(14, "DCMIPP_D3"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(125, "PH13"),
+ STM32_FUNCTION(0, "GPIOH13"),
+ STM32_FUNCTION(1, "TRACED15"),
+ STM32_FUNCTION(3, "USART2_CK"),
+ STM32_FUNCTION(4, "TIM8_CH1N"),
+ STM32_FUNCTION(5, "I2C5_SCL"),
+ STM32_FUNCTION(7, "SPI3_SCK I2S3_CK"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(14, "LCD_G3"),
+ STM32_FUNCTION(15, "LCD_G2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(126, "PH14"),
+ STM32_FUNCTION(0, "GPIOH14"),
+ STM32_FUNCTION(4, "DFSDM1_DATIN2"),
+ STM32_FUNCTION(5, "I2C3_SDA"),
+ STM32_FUNCTION(7, "DCMIPP_D8"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(12, "LCD_B4"),
+ STM32_FUNCTION(14, "DCMIPP_D2"),
+ STM32_FUNCTION(15, "DCMIPP_PIXCLK"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(128, "PI0"),
+ STM32_FUNCTION(0, "GPIOI0"),
+ STM32_FUNCTION(9, "SPDIFRX_IN0"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(129, "PI1"),
+ STM32_FUNCTION(0, "GPIOI1"),
+ STM32_FUNCTION(9, "SPDIFRX_IN1"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(130, "PI2"),
+ STM32_FUNCTION(0, "GPIOI2"),
+ STM32_FUNCTION(9, "SPDIFRX_IN2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(131, "PI3"),
+ STM32_FUNCTION(0, "GPIOI3"),
+ STM32_FUNCTION(9, "SPDIFRX_IN3"),
+ STM32_FUNCTION(12, "ETH1_MII_RX_ER"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(132, "PI4"),
+ STM32_FUNCTION(0, "GPIOI4"),
+ STM32_FUNCTION(1, "BOOT0"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(133, "PI5"),
+ STM32_FUNCTION(0, "GPIOI5"),
+ STM32_FUNCTION(1, "BOOT1"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(134, "PI6"),
+ STM32_FUNCTION(0, "GPIOI6"),
+ STM32_FUNCTION(1, "BOOT2"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(135, "PI7"),
+ STM32_FUNCTION(0, "GPIOI7"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+};
+
+static struct stm32_pinctrl_match_data stm32mp135_match_data = {
+ .pins = stm32mp135_pins,
+ .npins = ARRAY_SIZE(stm32mp135_pins),
+};
+
+static const struct of_device_id stm32mp135_pctrl_match[] = {
+ {
+ .compatible = "st,stm32mp135-pinctrl",
+ .data = &stm32mp135_match_data,
+ },
+ { }
+};
+
+static const struct dev_pm_ops stm32_pinctrl_dev_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, stm32_pinctrl_resume)
+};
+
+static struct platform_driver stm32mp135_pinctrl_driver = {
+ .probe = stm32_pctl_probe,
+ .driver = {
+ .name = "stm32mp135-pinctrl",
+ .of_match_table = stm32mp135_pctrl_match,
+ .pm = &stm32_pinctrl_dev_pm_ops,
+ },
+};
+
+static int __init stm32mp135_pinctrl_init(void)
+{
+ return platform_driver_register(&stm32mp135_pinctrl_driver);
+}
+arch_initcall(stm32mp135_pinctrl_init);
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 41baccba033f..f901d2e43166 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o
obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
-cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o
+cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o cros_ec_trace.o
obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o
obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index aa7f7aa77297..a7404d69b2d3 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -279,6 +279,15 @@ static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev,
msg->insize = sizeof(struct ec_response_get_protocol_info);
ret = send_command(ec_dev, msg);
+ /*
+ * Send command once again when timeout occurred.
+ * Fingerprint MCU (FPMCU) is restarted during system boot which
+ * introduces small window in which FPMCU won't respond for any
+ * messages sent by kernel. There is no need to wait before next
+ * attempt because we waited at least EC_MSG_DEADLINE_MS.
+ */
+ if (ret == -ETIMEDOUT)
+ ret = send_command(ec_dev, msg);
if (ret < 0) {
dev_dbg(ec_dev->dev,
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
index 8921f24e83ba..98e37080f760 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
@@ -17,6 +17,8 @@
#include <linux/sort.h>
#include <linux/slab.h>
+#include "cros_ec_trace.h"
+
/* Precision of fixed point for the m values from the filter */
#define M_PRECISION BIT(23)
@@ -291,6 +293,7 @@ cros_ec_sensor_ring_ts_filter_update(struct cros_ec_sensors_ts_filter_state
state->median_m = 0;
state->median_error = 0;
}
+ trace_cros_ec_sensorhub_filter(state, dx, dy);
}
/**
@@ -427,6 +430,11 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
if (new_timestamp - *current_timestamp > 0)
*current_timestamp = new_timestamp;
}
+ trace_cros_ec_sensorhub_timestamp(in->timestamp,
+ fifo_info->timestamp,
+ fifo_timestamp,
+ *current_timestamp,
+ now);
}
if (in->flags & MOTIONSENSE_SENSOR_FLAG_ODR) {
@@ -460,6 +468,12 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
/* Regular sample */
out->sensor_id = in->sensor_num;
+ trace_cros_ec_sensorhub_data(in->sensor_num,
+ fifo_info->timestamp,
+ fifo_timestamp,
+ *current_timestamp,
+ now);
+
if (*current_timestamp - now > 0) {
/*
* This fix is needed to overcome the timestamp filter putting
diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
index f744b21bc655..7e7cfc98657a 100644
--- a/drivers/platform/chrome/cros_ec_trace.h
+++ b/drivers/platform/chrome/cros_ec_trace.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/tracepoint.h>
@@ -70,6 +71,99 @@ TRACE_EVENT(cros_ec_request_done,
__entry->retval)
);
+TRACE_EVENT(cros_ec_sensorhub_timestamp,
+ TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+ s64 current_timestamp, s64 current_time),
+ TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
+ current_time),
+ TP_STRUCT__entry(
+ __field(u32, ec_sample_timestamp)
+ __field(u32, ec_fifo_timestamp)
+ __field(s64, fifo_timestamp)
+ __field(s64, current_timestamp)
+ __field(s64, current_time)
+ __field(s64, delta)
+ ),
+ TP_fast_assign(
+ __entry->ec_sample_timestamp = ec_sample_timestamp;
+ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+ __entry->fifo_timestamp = fifo_timestamp;
+ __entry->current_timestamp = current_timestamp;
+ __entry->current_time = current_time;
+ __entry->delta = current_timestamp - current_time;
+ ),
+ TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+ __entry->ec_sample_timestamp,
+ __entry->ec_fifo_timestamp,
+ __entry->fifo_timestamp,
+ __entry->current_timestamp,
+ __entry->current_time,
+ __entry->delta
+ )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_data,
+ TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+ s64 current_timestamp, s64 current_time),
+ TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
+ TP_STRUCT__entry(
+ __field(u32, ec_sensor_num)
+ __field(u32, ec_fifo_timestamp)
+ __field(s64, fifo_timestamp)
+ __field(s64, current_timestamp)
+ __field(s64, current_time)
+ __field(s64, delta)
+ ),
+ TP_fast_assign(
+ __entry->ec_sensor_num = ec_sensor_num;
+ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+ __entry->fifo_timestamp = fifo_timestamp;
+ __entry->current_timestamp = current_timestamp;
+ __entry->current_time = current_time;
+ __entry->delta = current_timestamp - current_time;
+ ),
+ TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+ __entry->ec_sensor_num,
+ __entry->ec_fifo_timestamp,
+ __entry->fifo_timestamp,
+ __entry->current_timestamp,
+ __entry->current_time,
+ __entry->delta
+ )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_filter,
+ TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
+ TP_ARGS(state, dx, dy),
+ TP_STRUCT__entry(
+ __field(s64, dx)
+ __field(s64, dy)
+ __field(s64, median_m)
+ __field(s64, median_error)
+ __field(s64, history_len)
+ __field(s64, x)
+ __field(s64, y)
+ ),
+ TP_fast_assign(
+ __entry->dx = dx;
+ __entry->dy = dy;
+ __entry->median_m = state->median_m;
+ __entry->median_error = state->median_error;
+ __entry->history_len = state->history_len;
+ __entry->x = state->x_offset;
+ __entry->y = state->y_offset;
+ ),
+ TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
+ __entry->dx,
+ __entry->dy,
+ __entry->median_m,
+ __entry->median_error,
+ __entry->history_len,
+ __entry->x,
+ __entry->y
+ )
+);
+
#endif /* _CROS_EC_TRACE_H_ */
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 27c068c4c38d..262a891eded3 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -1054,24 +1054,6 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
return 0;
}
-/* Check the EC feature flags to see if TYPEC_* features are supported. */
-static int cros_typec_feature_supported(struct cros_typec_data *typec, enum ec_feature_code feature)
-{
- struct ec_response_get_features resp = {};
- int ret;
-
- ret = cros_typec_ec_command(typec, 0, EC_CMD_GET_FEATURES, NULL, 0,
- &resp, sizeof(resp));
- if (ret < 0) {
- dev_warn(typec->dev,
- "Failed to get features, assuming typec feature=%d unsupported.\n",
- feature);
- return 0;
- }
-
- return resp.flags[feature / 32] & EC_FEATURE_MASK_1(feature);
-}
-
static void cros_typec_port_work(struct work_struct *work)
{
struct cros_typec_data *typec = container_of(work, struct cros_typec_data, port_work);
@@ -1113,6 +1095,7 @@ MODULE_DEVICE_TABLE(of, cros_typec_of_match);
static int cros_typec_probe(struct platform_device *pdev)
{
+ struct cros_ec_dev *ec_dev = NULL;
struct device *dev = &pdev->dev;
struct cros_typec_data *typec;
struct ec_response_usb_pd_ports resp;
@@ -1132,10 +1115,10 @@ static int cros_typec_probe(struct platform_device *pdev)
return ret;
}
- typec->typec_cmd_supported = !!cros_typec_feature_supported(typec,
- EC_FEATURE_TYPEC_CMD);
- typec->needs_mux_ack = !!cros_typec_feature_supported(typec,
- EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK);
+ ec_dev = dev_get_drvdata(&typec->ec->ec->dev);
+ typec->typec_cmd_supported = !!cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_CMD);
+ typec->needs_mux_ack = !!cros_ec_check_features(ec_dev,
+ EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK);
ret = cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
&resp, sizeof(resp));
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 35883984251f..04bc3b50aa7a 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -79,7 +79,8 @@
#define MLXBF_PMC_L3C_PERF_CNT_HIGH_VAL GENMASK(24, 0)
/**
- * Structure to hold attribute and block info for each sysfs entry
+ * struct mlxbf_pmc_attribute - Structure to hold attribute and block info
+ * for each sysfs entry
* @dev_attr: Device attribute struct
* @index: index to identify counter number within a block
* @nr: block number to which the sysfs belongs
@@ -91,7 +92,7 @@ struct mlxbf_pmc_attribute {
};
/**
- * Structure to hold info for each HW block
+ * struct mlxbf_pmc_block_info - Structure to hold info for each HW block
*
* @mmio_base: The VA at which the PMC block is mapped
* @blk_size: Size of each mapped region
@@ -102,7 +103,7 @@ struct mlxbf_pmc_attribute {
* @attr_event_list: Attributes for "event_list" sysfs files
* @attr_enable: Attributes for "enable" sysfs files
* @block_attr: All attributes needed for the block
- * @blcok_attr_grp: Attribute group for the block
+ * @block_attr_grp: Attribute group for the block
*/
struct mlxbf_pmc_block_info {
void __iomem *mmio_base;
@@ -118,7 +119,7 @@ struct mlxbf_pmc_block_info {
};
/**
- * Structure to hold PMC context info
+ * struct mlxbf_pmc_context - Structure to hold PMC context info
*
* @pdev: The kernel structure representing the device
* @total_blocks: Total number of blocks
@@ -127,7 +128,7 @@ struct mlxbf_pmc_block_info {
* @block_name: Block name
* @block: Block info
* @groups: Attribute groups from each block
- * @sv_sreg_support: Whether SMCs are used to access performance registers
+ * @svc_sreg_support: Whether SMCs are used to access performance registers
* @sreg_tbl_perf: Secure register access table number
* @event_set: Event set to use
*/
@@ -145,7 +146,7 @@ struct mlxbf_pmc_context {
};
/**
- * Structure to hold supported events for each block
+ * struct mlxbf_pmc_events - Structure to hold supported events for each block
* @evt_num: Event number used to program counters
* @evt_name: Name of the event
*/
diff --git a/drivers/platform/surface/aggregator/Makefile b/drivers/platform/surface/aggregator/Makefile
index c8498c41e758..c0d550eda5cd 100644
--- a/drivers/platform/surface/aggregator/Makefile
+++ b/drivers/platform/surface/aggregator/Makefile
@@ -6,12 +6,9 @@ CFLAGS_core.o = -I$(src)
obj-$(CONFIG_SURFACE_AGGREGATOR) += surface_aggregator.o
-surface_aggregator-objs := core.o
-surface_aggregator-objs += ssh_parser.o
-surface_aggregator-objs += ssh_packet_layer.o
-surface_aggregator-objs += ssh_request_layer.o
-surface_aggregator-objs += controller.o
-
-ifeq ($(CONFIG_SURFACE_AGGREGATOR_BUS),y)
-surface_aggregator-objs += bus.o
-endif
+surface_aggregator-y := core.o
+surface_aggregator-y += ssh_parser.o
+surface_aggregator-y += ssh_packet_layer.o
+surface_aggregator-y += ssh_request_layer.o
+surface_aggregator-$(CONFIG_SURFACE_AGGREGATOR_BUS) += bus.o
+surface_aggregator-y += controller.o
diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c
index dea82aa1abd4..90c1568ea4e0 100644
--- a/drivers/platform/surface/surface3_power.c
+++ b/drivers/platform/surface/surface3_power.c
@@ -384,13 +384,7 @@ mshw0011_space_handler(u32 function, acpi_physical_address command,
if (ACPI_FAILURE(ret))
return ret;
- if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) {
- ret = AE_BAD_PARAMETER;
- goto err;
- }
-
- sb = &ares->data.i2c_serial_bus;
- if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+ if (!value64 || !i2c_acpi_get_i2c_resource(ares, &sb)) {
ret = AE_BAD_PARAMETER;
goto err;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index d12db6c316ea..e21ea3d23e6f 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -77,28 +77,6 @@ config UV_SYSFS
To compile this driver as a module, choose M here: the module will
be called uv_sysfs.
-config INTEL_WMI_SBL_FW_UPDATE
- tristate "Intel WMI Slim Bootloader firmware update signaling driver"
- depends on ACPI_WMI
- help
- Say Y here if you want to be able to use the WMI interface to signal
- Slim Bootloader to trigger update on next reboot.
-
- To compile this driver as a module, choose M here: the module will
- be called intel-wmi-sbl-fw-update.
-
-config INTEL_WMI_THUNDERBOLT
- tristate "Intel WMI thunderbolt force power driver"
- depends on ACPI_WMI
- help
- Say Y here if you want to be able to use the WMI interface on select
- systems to force the power control of Intel Thunderbolt controllers.
- This is useful for updating the firmware when devices are not plugged
- into the controller.
-
- To compile this driver as a module, choose M here: the module will
- be called intel-wmi-thunderbolt.
-
config MXM_WMI
tristate "WMI support for MXM Laptop Graphics"
depends on ACPI_WMI
@@ -281,6 +259,7 @@ config ASUS_WMI
select INPUT_SPARSEKMAP
select LEDS_CLASS
select NEW_LEDS
+ select ACPI_PLATFORM_PROFILE
help
Say Y here if you have a WMI aware Asus laptop (like Eee PCs or new
Asus Notebooks).
@@ -302,6 +281,19 @@ config ASUS_NB_WMI
If you have an ACPI-WMI compatible Asus Notebook, say Y or M
here.
+config MERAKI_MX100
+ tristate "Cisco Meraki MX100 Platform Driver"
+ depends on GPIOLIB
+ depends on GPIO_ICH
+ depends on LEDS_CLASS
+ select LEDS_GPIO
+ help
+ This driver provides support for the front button and LEDs on
+ the Cisco Meraki MX100 (Tinkerbell) 1U appliance.
+
+ To compile this driver as a module, choose M here: the module
+ will be called meraki-mx100.
+
config EEEPC_LAPTOP
tristate "Eee PC Hotkey Driver"
depends on ACPI
@@ -654,105 +646,6 @@ config THINKPAD_LMI
source "drivers/platform/x86/intel/Kconfig"
-config INTEL_ATOMISP2_LED
- tristate "Intel AtomISP2 camera LED driver"
- depends on GPIOLIB && LEDS_GPIO
- help
- Many Bay Trail and Cherry Trail devices come with a camera attached
- to Intel's Image Signal Processor. Linux currently does not have a
- driver for these, so they do not work as a camera. Some of these
- camera's have a LED which is controlled through a GPIO.
-
- Some of these devices have a firmware issue where the LED gets turned
- on at boot. This driver will turn the LED off at boot and also allows
- controlling the LED (repurposing it) through the sysfs LED interface.
-
- Which GPIO is attached to the LED is usually not described in the
- ACPI tables, so this driver contains per-system info about the GPIO
- inside the driver, this means that this driver only works on systems
- the driver knows about.
-
- To compile this driver as a module, choose M here: the module
- will be called intel_atomisp2_led.
-
-config INTEL_ATOMISP2_PM
- tristate "Intel AtomISP2 dummy / power-management driver"
- depends on PCI && IOSF_MBI && PM
- depends on !INTEL_ATOMISP
- help
- Power-management driver for Intel's Image Signal Processor found on
- Bay Trail and Cherry Trail devices. This dummy driver's sole purpose
- is to turn the ISP off (put it in D3) to save power and to allow
- entering of S0ix modes.
-
- To compile this driver as a module, choose M here: the module
- will be called intel_atomisp2_pm.
-
-config INTEL_HID_EVENT
- tristate "INTEL HID Event"
- depends on ACPI
- depends on INPUT
- depends on I2C
- select INPUT_SPARSEKMAP
- help
- This driver provides support for the Intel HID Event hotkey interface.
- Some laptops require this driver for hotkey support.
-
- To compile this driver as a module, choose M here: the module will
- be called intel_hid.
-
-config INTEL_INT0002_VGPIO
- tristate "Intel ACPI INT0002 Virtual GPIO driver"
- depends on GPIOLIB && ACPI && PM_SLEEP
- select GPIOLIB_IRQCHIP
- help
- Some peripherals on Bay Trail and Cherry Trail platforms signal a
- Power Management Event (PME) to the Power Management Controller (PMC)
- to wakeup the system. When this happens software needs to explicitly
- clear the PME bus 0 status bit in the GPE0a_STS register to avoid an
- IRQ storm on IRQ 9.
-
- This is modelled in ACPI through the INT0002 ACPI device, which is
- called a "Virtual GPIO controller" in ACPI because it defines the
- event handler to call when the PME triggers through _AEI and _L02
- methods as would be done for a real GPIO interrupt in ACPI.
-
- To compile this driver as a module, choose M here: the module will
- be called intel_int0002_vgpio.
-
-config INTEL_MENLOW
- tristate "Thermal Management driver for Intel menlow platform"
- depends on ACPI_THERMAL
- select THERMAL
- help
- ACPI thermal management enhancement driver on
- Intel Menlow platform.
-
- If unsure, say N.
-
-config INTEL_OAKTRAIL
- tristate "Intel Oaktrail Platform Extras"
- depends on ACPI
- depends on ACPI_VIDEO || ACPI_VIDEO = n
- depends on RFKILL && BACKLIGHT_CLASS_DEVICE && ACPI
- help
- Intel Oaktrail platform need this driver to provide interfaces to
- enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y
- here; it will only load on supported platforms.
-
-config INTEL_VBTN
- tristate "INTEL VIRTUAL BUTTON"
- depends on ACPI
- depends on INPUT
- depends on I2C
- select INPUT_SPARSEKMAP
- help
- This driver provides support for the Intel Virtual Button interface.
- Some laptops require this driver for power button support.
-
- To compile this driver as a module, choose M here: the module will
- be called intel_vbtn.
-
config MSI_LAPTOP
tristate "MSI Laptop Extras"
depends on ACPI
@@ -1106,150 +999,6 @@ config INTEL_IPS
functionality. If in doubt, say Y here; it will only load on
supported platforms.
-config INTEL_RST
- tristate "Intel Rapid Start Technology Driver"
- depends on ACPI
- help
- This driver provides support for modifying parameters on systems
- equipped with Intel's Rapid Start Technology. When put in an ACPI
- sleep state, these devices will wake after either a configured
- timeout or when the system battery reaches a critical state,
- automatically copying memory contents to disk. On resume, the
- firmware will copy the memory contents back to RAM and resume the OS
- as usual.
-
-config INTEL_SMARTCONNECT
- tristate "Intel Smart Connect disabling driver"
- depends on ACPI
- help
- Intel Smart Connect is a technology intended to permit devices to
- update state by resuming for a short period of time at regular
- intervals. If a user enables this functionality under Windows and
- then reboots into Linux, the system may remain configured to resume
- on suspend. In the absence of any userspace to support it, the system
- will then remain awake until something triggers another suspend.
-
- This driver checks to determine whether the device has Intel Smart
- Connect enabled, and if so disables it.
-
-source "drivers/platform/x86/intel_speed_select_if/Kconfig"
-
-config INTEL_TURBO_MAX_3
- bool "Intel Turbo Boost Max Technology 3.0 enumeration driver"
- depends on X86_64 && SCHED_MC_PRIO
- help
- This driver reads maximum performance ratio of each CPU and set up
- the scheduler priority metrics. In this way scheduler can prefer
- CPU with higher performance to schedule tasks.
- This driver is only required when the system is not using Hardware
- P-States (HWP). In HWP mode, priority can be read from ACPI tables.
-
-config INTEL_UNCORE_FREQ_CONTROL
- tristate "Intel Uncore frequency control driver"
- depends on X86_64
- help
- This driver allows control of uncore frequency limits on
- supported server platforms.
- Uncore frequency controls RING/LLC (last-level cache) clocks.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-uncore-frequency.
-
-config INTEL_BXTWC_PMIC_TMU
- tristate "Intel BXT Whiskey Cove TMU Driver"
- depends on REGMAP
- depends on MFD_INTEL_PMC_BXT
- depends on INTEL_SOC_PMIC_BXTWC
- help
- Select this driver to use Intel BXT Whiskey Cove PMIC TMU feature.
- This driver enables the alarm wakeup functionality in the TMU unit
- of Whiskey Cove PMIC.
-
-config INTEL_CHTDC_TI_PWRBTN
- tristate "Intel Cherry Trail Dollar Cove TI power button driver"
- depends on INTEL_SOC_PMIC_CHTDC_TI
- depends on INPUT
- help
- This option adds a power button driver driver for Dollar Cove TI
- PMIC on Intel Cherry Trail devices.
-
- To compile this driver as a module, choose M here: the module
- will be called intel_chtdc_ti_pwrbtn.
-
-config INTEL_MRFLD_PWRBTN
- tristate "Intel Merrifield Basin Cove power button driver"
- depends on INTEL_SOC_PMIC_MRFLD
- depends on INPUT
- help
- This option adds a power button driver for Basin Cove PMIC
- on Intel Merrifield devices.
-
- To compile this driver as a module, choose M here: the module
- will be called intel_mrfld_pwrbtn.
-
-config INTEL_PMC_CORE
- tristate "Intel PMC Core driver"
- depends on PCI
- depends on ACPI
- help
- The Intel Platform Controller Hub for Intel Core SoCs provides access
- to Power Management Controller registers via various interfaces. This
- driver can utilize debugging capabilities and supported features as
- exposed by the Power Management Controller. It also may perform some
- tasks in the PMC in order to enable transition into the SLPS0 state.
- It should be selected on all Intel platforms supported by the driver.
-
- Supported features:
- - SLP_S0_RESIDENCY counter
- - PCH IP Power Gating status
- - LTR Ignore / LTR Show
- - MPHY/PLL gating status (Sunrisepoint PCH only)
- - SLPS0 Debug registers (Cannonlake/Icelake PCH)
- - Low Power Mode registers (Tigerlake and beyond)
- - PMC quirks as needed to enable SLPS0/S0ix
-
-config INTEL_PMT_CLASS
- tristate
- help
- The Intel Platform Monitoring Technology (PMT) class driver provides
- the basic sysfs interface and file hierarchy used by PMT devices.
-
- For more information, see:
- <file:Documentation/ABI/testing/sysfs-class-intel_pmt>
-
- To compile this driver as a module, choose M here: the module
- will be called intel_pmt_class.
-
-config INTEL_PMT_TELEMETRY
- tristate "Intel Platform Monitoring Technology (PMT) Telemetry driver"
- depends on MFD_INTEL_PMT
- select INTEL_PMT_CLASS
- help
- The Intel Platform Monitory Technology (PMT) Telemetry driver provides
- access to hardware telemetry metrics on devices that support the
- feature.
-
- To compile this driver as a module, choose M here: the module
- will be called intel_pmt_telemetry.
-
-config INTEL_PMT_CRASHLOG
- tristate "Intel Platform Monitoring Technology (PMT) Crashlog driver"
- depends on MFD_INTEL_PMT
- select INTEL_PMT_CLASS
- help
- The Intel Platform Monitoring Technology (PMT) crashlog driver provides
- access to hardware crashlog capabilities on devices that support the
- feature.
-
- To compile this driver as a module, choose M here: the module
- will be called intel_pmt_crashlog.
-
-config INTEL_PUNIT_IPC
- tristate "Intel P-Unit IPC Driver"
- help
- This driver provides support for Intel P-Unit Mailbox IPC mechanism,
- which is used to bridge the communications between kernel and P-Unit.
-
config INTEL_SCU_IPC
bool
@@ -1297,18 +1046,6 @@ config INTEL_SCU_IPC_UTIL
low level access for debug work and updating the firmware. Say
N unless you will be doing this on an Intel MID platform.
-config INTEL_TELEMETRY
- tristate "Intel SoC Telemetry Driver"
- depends on X86_64
- depends on MFD_INTEL_PMC_BXT
- depends on INTEL_PUNIT_IPC
- help
- This driver provides interfaces to configure and use
- telemetry for INTEL SoC from APL onwards. It is also
- used to get various SoC events and parameters
- directly via debugfs files. Various tools may use
- this interface for SoC state monitoring.
-
endif # X86_PLATFORM_DEVICES
config PMC_ATOM
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 7ee369aab10d..69690e26bb6d 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -10,8 +10,6 @@ obj-$(CONFIG_WMI_BMOF) += wmi-bmof.o
# WMI drivers
obj-$(CONFIG_HUAWEI_WMI) += huawei-wmi.o
-obj-$(CONFIG_INTEL_WMI_SBL_FW_UPDATE) += intel-wmi-sbl-fw-update.o
-obj-$(CONFIG_INTEL_WMI_THUNDERBOLT) += intel-wmi-thunderbolt.o
obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
obj-$(CONFIG_PEAQ_WMI) += peaq-wmi.o
obj-$(CONFIG_XIAOMI_WMI) += xiaomi-wmi.o
@@ -39,6 +37,9 @@ obj-$(CONFIG_ASUS_NB_WMI) += asus-nb-wmi.o
obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o
+# Cisco/Meraki
+obj-$(CONFIG_MERAKI_MX100) += meraki-mx100.o
+
# Dell
obj-$(CONFIG_X86_PLATFORM_DRIVERS_DELL) += dell/
@@ -68,14 +69,6 @@ obj-$(CONFIG_THINKPAD_LMI) += think-lmi.o
# Intel
obj-$(CONFIG_X86_PLATFORM_DRIVERS_INTEL) += intel/
-obj-$(CONFIG_INTEL_ATOMISP2_LED) += intel_atomisp2_led.o
-obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
-obj-$(CONFIG_INTEL_HID_EVENT) += intel-hid.o
-obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o
-obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
-obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
-obj-$(CONFIG_INTEL_VBTN) += intel-vbtn.o
-
# MSI
obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
obj-$(CONFIG_MSI_WMI) += msi-wmi.o
@@ -118,27 +111,11 @@ obj-$(CONFIG_WIRELESS_HOTKEY) += wireless-hotkey.o
# Intel uncore drivers
obj-$(CONFIG_INTEL_IPS) += intel_ips.o
-obj-$(CONFIG_INTEL_RST) += intel-rst.o
-obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
-obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/
-obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
-obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o
# Intel PMIC / PMC / P-Unit devices
-obj-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += intel_bxtwc_tmu.o
-obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
-obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o
-obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o intel_pmc_core_pltdrv.o
-obj-$(CONFIG_INTEL_PMT_CLASS) += intel_pmt_class.o
-obj-$(CONFIG_INTEL_PMT_TELEMETRY) += intel_pmt_telemetry.o
-obj-$(CONFIG_INTEL_PMT_CRASHLOG) += intel_pmt_crashlog.o
-obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
obj-$(CONFIG_INTEL_SCU_PCI) += intel_scu_pcidrv.o
obj-$(CONFIG_INTEL_SCU_PLATFORM) += intel_scu_pltdrv.o
obj-$(CONFIG_INTEL_SCU_WDT) += intel_scu_wdt.o
obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
-obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \
- intel_telemetry_pltdrv.o \
- intel_telemetry_debugfs.o
obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 85db9403cc14..694b45ed06a2 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -60,6 +60,11 @@ MODULE_LICENSE("GPL");
#define ACER_WMID_GET_THREEG_METHODID 10
#define ACER_WMID_SET_THREEG_METHODID 11
+#define ACER_WMID_SET_GAMING_LED_METHODID 2
+#define ACER_WMID_GET_GAMING_LED_METHODID 4
+#define ACER_WMID_SET_GAMING_FAN_BEHAVIOR 14
+#define ACER_WMID_SET_GAMING_MISC_SETTING_METHODID 22
+
/*
* Acer ACPI method GUIDs
*/
@@ -68,6 +73,7 @@ MODULE_LICENSE("GPL");
#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
#define WMID_GUID2 "95764E09-FB56-4E83-B31A-37761F60994A"
#define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531"
+#define WMID_GUID4 "7A4DDFE7-5B5D-40B4-8595-4408E0CC7F56"
/*
* Acer ACPI event GUIDs
@@ -81,6 +87,7 @@ MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
enum acer_wmi_event_ids {
WMID_HOTKEY_EVENT = 0x1,
WMID_ACCEL_OR_KBD_DOCK_EVENT = 0x5,
+ WMID_GAMING_TURBO_KEY_EVENT = 0x7,
};
static const struct key_entry acer_wmi_keymap[] __initconst = {
@@ -215,6 +222,9 @@ struct hotkey_function_type_aa {
#define ACER_CAP_THREEG BIT(4)
#define ACER_CAP_SET_FUNCTION_MODE BIT(5)
#define ACER_CAP_KBD_DOCK BIT(6)
+#define ACER_CAP_TURBO_OC BIT(7)
+#define ACER_CAP_TURBO_LED BIT(8)
+#define ACER_CAP_TURBO_FAN BIT(9)
/*
* Interface type flags
@@ -301,6 +311,9 @@ struct quirk_entry {
u8 mailled;
s8 brightness;
u8 bluetooth;
+ u8 turbo;
+ u8 cpu_fans;
+ u8 gpu_fans;
};
static struct quirk_entry *quirks;
@@ -312,6 +325,10 @@ static void __init set_quirks(void)
if (quirks->brightness)
interface->capability |= ACER_CAP_BRIGHTNESS;
+
+ if (quirks->turbo)
+ interface->capability |= ACER_CAP_TURBO_OC | ACER_CAP_TURBO_LED
+ | ACER_CAP_TURBO_FAN;
}
static int __init dmi_matched(const struct dmi_system_id *dmi)
@@ -340,6 +357,12 @@ static struct quirk_entry quirk_acer_travelmate_2490 = {
.mailled = 1,
};
+static struct quirk_entry quirk_acer_predator_ph315_53 = {
+ .turbo = 1,
+ .cpu_fans = 1,
+ .gpu_fans = 1,
+};
+
/* This AMW0 laptop has no bluetooth */
static struct quirk_entry quirk_medion_md_98300 = {
.wireless = 1,
@@ -508,6 +531,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
.driver_data = &quirk_acer_travelmate_2490,
},
{
+ .callback = dmi_matched,
+ .ident = "Acer Predator PH315-53",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH315-53"),
+ },
+ .driver_data = &quirk_acer_predator_ph315_53,
+ },
+ {
.callback = set_force_caps,
.ident = "Acer Aspire Switch 10E SW3-016",
.matches = {
@@ -1345,6 +1377,114 @@ static struct wmi_interface wmid_v2_interface = {
};
/*
+ * WMID Gaming interface
+ */
+
+static acpi_status
+WMI_gaming_execute_u64(u32 method_id, u64 in, u64 *out)
+{
+ struct acpi_buffer input = { (acpi_size) sizeof(u64), (void *)(&in) };
+ struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ u32 tmp = 0;
+ acpi_status status;
+
+ status = wmi_evaluate_method(WMID_GUID4, 0, method_id, &input, &result);
+
+ if (ACPI_FAILURE(status))
+ return status;
+ obj = (union acpi_object *) result.pointer;
+
+ if (obj) {
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ if (obj->buffer.length == sizeof(u32))
+ tmp = *((u32 *) obj->buffer.pointer);
+ else if (obj->buffer.length == sizeof(u64))
+ tmp = *((u64 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ tmp = (u64) obj->integer.value;
+ }
+ }
+
+ if (out)
+ *out = tmp;
+
+ kfree(result.pointer);
+
+ return status;
+}
+
+static acpi_status WMID_gaming_set_u64(u64 value, u32 cap)
+{
+ u32 method_id = 0;
+
+ if (!(interface->capability & cap))
+ return AE_BAD_PARAMETER;
+
+ switch (cap) {
+ case ACER_CAP_TURBO_LED:
+ method_id = ACER_WMID_SET_GAMING_LED_METHODID;
+ break;
+ case ACER_CAP_TURBO_FAN:
+ method_id = ACER_WMID_SET_GAMING_FAN_BEHAVIOR;
+ break;
+ case ACER_CAP_TURBO_OC:
+ method_id = ACER_WMID_SET_GAMING_MISC_SETTING_METHODID;
+ break;
+ default:
+ return AE_BAD_PARAMETER;
+ }
+
+ return WMI_gaming_execute_u64(method_id, value, NULL);
+}
+
+static acpi_status WMID_gaming_get_u64(u64 *value, u32 cap)
+{
+ acpi_status status;
+ u64 result;
+ u64 input;
+ u32 method_id;
+
+ if (!(interface->capability & cap))
+ return AE_BAD_PARAMETER;
+
+ switch (cap) {
+ case ACER_CAP_TURBO_LED:
+ method_id = ACER_WMID_GET_GAMING_LED_METHODID;
+ input = 0x1;
+ break;
+ default:
+ return AE_BAD_PARAMETER;
+ }
+ status = WMI_gaming_execute_u64(method_id, input, &result);
+ if (ACPI_SUCCESS(status))
+ *value = (u64) result;
+
+ return status;
+}
+
+static void WMID_gaming_set_fan_mode(u8 fan_mode)
+{
+ /* fan_mode = 1 is used for auto, fan_mode = 2 used for turbo*/
+ u64 gpu_fan_config1 = 0, gpu_fan_config2 = 0;
+ int i;
+
+ if (quirks->cpu_fans > 0)
+ gpu_fan_config2 |= 1;
+ for (i = 0; i < (quirks->cpu_fans + quirks->gpu_fans); ++i)
+ gpu_fan_config2 |= 1 << (i + 1);
+ for (i = 0; i < quirks->gpu_fans; ++i)
+ gpu_fan_config2 |= 1 << (i + 3);
+ if (quirks->cpu_fans > 0)
+ gpu_fan_config1 |= fan_mode;
+ for (i = 0; i < (quirks->cpu_fans + quirks->gpu_fans); ++i)
+ gpu_fan_config1 |= fan_mode << (2 * i + 2);
+ for (i = 0; i < quirks->gpu_fans; ++i)
+ gpu_fan_config1 |= fan_mode << (2 * i + 6);
+ WMID_gaming_set_u64(gpu_fan_config2 | gpu_fan_config1 << 16, ACER_CAP_TURBO_FAN);
+}
+
+/*
* Generic Device (interface-independent)
*/
@@ -1576,6 +1716,41 @@ static int acer_gsensor_event(void)
}
/*
+ * Predator series turbo button
+ */
+static int acer_toggle_turbo(void)
+{
+ u64 turbo_led_state;
+
+ /* Get current state from turbo button */
+ if (ACPI_FAILURE(WMID_gaming_get_u64(&turbo_led_state, ACER_CAP_TURBO_LED)))
+ return -1;
+
+ if (turbo_led_state) {
+ /* Turn off turbo led */
+ WMID_gaming_set_u64(0x1, ACER_CAP_TURBO_LED);
+
+ /* Set FAN mode to auto */
+ WMID_gaming_set_fan_mode(0x1);
+
+ /* Set OC to normal */
+ WMID_gaming_set_u64(0x5, ACER_CAP_TURBO_OC);
+ WMID_gaming_set_u64(0x7, ACER_CAP_TURBO_OC);
+ } else {
+ /* Turn on turbo led */
+ WMID_gaming_set_u64(0x10001, ACER_CAP_TURBO_LED);
+
+ /* Set FAN mode to turbo */
+ WMID_gaming_set_fan_mode(0x2);
+
+ /* Set OC to turbo mode */
+ WMID_gaming_set_u64(0x205, ACER_CAP_TURBO_OC);
+ WMID_gaming_set_u64(0x207, ACER_CAP_TURBO_OC);
+ }
+ return turbo_led_state;
+}
+
+/*
* Switch series keyboard dock status
*/
static int acer_kbd_dock_state_to_sw_tablet_mode(u8 kbd_dock_state)
@@ -1872,6 +2047,10 @@ static void acer_wmi_notify(u32 value, void *context)
acer_gsensor_event();
acer_kbd_dock_event(&return_value);
break;
+ case WMID_GAMING_TURBO_KEY_EVENT:
+ if (return_value.key_num == 0x4)
+ acer_toggle_turbo();
+ break;
default:
pr_warn("Unknown function number - %d - %d\n",
return_value.function, return_value.key_num);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index ebaeb7bb80f5..e14fb5fa7324 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -26,6 +26,7 @@
#include <linux/rfkill.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
+#include <linux/platform_profile.h>
#include <linux/power_supply.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -210,12 +211,24 @@ struct asus_wmi {
u8 fan_boost_mode_mask;
u8 fan_boost_mode;
+ bool egpu_enable_available; // 0 = enable
+ bool egpu_enable;
+
+ bool dgpu_disable_available;
+ bool dgpu_disable;
+
bool throttle_thermal_policy_available;
u8 throttle_thermal_policy_mode;
+ struct platform_profile_handler platform_profile_handler;
+ bool platform_profile_support;
+
// The RSOC controls the maximum charging percentage.
bool battery_rsoc_available;
+ bool panel_overdrive_available;
+ bool panel_overdrive;
+
struct hotplug_slot hotplug_slot;
struct mutex hotplug_lock;
struct mutex wmi_lock;
@@ -424,6 +437,181 @@ static void lid_flip_tablet_mode_get_state(struct asus_wmi *asus)
}
}
+/* dGPU ********************************************************************/
+static int dgpu_disable_check_present(struct asus_wmi *asus)
+{
+ u32 result;
+ int err;
+
+ asus->dgpu_disable_available = false;
+
+ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_DGPU, &result);
+ if (err) {
+ if (err == -ENODEV)
+ return 0;
+ return err;
+ }
+
+ if (result & ASUS_WMI_DSTS_PRESENCE_BIT) {
+ asus->dgpu_disable_available = true;
+ asus->dgpu_disable = result & ASUS_WMI_DSTS_STATUS_BIT;
+ }
+
+ return 0;
+}
+
+static int dgpu_disable_write(struct asus_wmi *asus)
+{
+ u32 retval;
+ u8 value;
+ int err;
+
+ /* Don't rely on type conversion */
+ value = asus->dgpu_disable ? 1 : 0;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_DGPU, value, &retval);
+ if (err) {
+ pr_warn("Failed to set dgpu disable: %d\n", err);
+ return err;
+ }
+
+ if (retval > 1) {
+ pr_warn("Failed to set dgpu disable (retval): 0x%x\n", retval);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "dgpu_disable");
+
+ return 0;
+}
+
+static ssize_t dgpu_disable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ u8 mode = asus->dgpu_disable;
+
+ return sysfs_emit(buf, "%d\n", mode);
+}
+
+/*
+ * A user may be required to store the value twice, typcial store first, then
+ * rescan PCI bus to activate power, then store a second time to save correctly.
+ * The reason for this is that an extra code path in the ACPI is enabled when
+ * the device and bus are powered.
+ */
+static ssize_t dgpu_disable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool disable;
+ int result;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtobool(buf, &disable);
+ if (result)
+ return result;
+
+ asus->dgpu_disable = disable;
+
+ result = dgpu_disable_write(asus);
+ if (result)
+ return result;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(dgpu_disable);
+
+/* eGPU ********************************************************************/
+static int egpu_enable_check_present(struct asus_wmi *asus)
+{
+ u32 result;
+ int err;
+
+ asus->egpu_enable_available = false;
+
+ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_EGPU, &result);
+ if (err) {
+ if (err == -ENODEV)
+ return 0;
+ return err;
+ }
+
+ if (result & ASUS_WMI_DSTS_PRESENCE_BIT) {
+ asus->egpu_enable_available = true;
+ asus->egpu_enable = result & ASUS_WMI_DSTS_STATUS_BIT;
+ }
+
+ return 0;
+}
+
+static int egpu_enable_write(struct asus_wmi *asus)
+{
+ u32 retval;
+ u8 value;
+ int err;
+
+ /* Don't rely on type conversion */
+ value = asus->egpu_enable ? 1 : 0;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_EGPU, value, &retval);
+
+ if (err) {
+ pr_warn("Failed to set egpu disable: %d\n", err);
+ return err;
+ }
+
+ if (retval > 1) {
+ pr_warn("Failed to set egpu disable (retval): 0x%x\n", retval);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "egpu_enable");
+
+ return 0;
+}
+
+static ssize_t egpu_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ bool mode = asus->egpu_enable;
+
+ return sysfs_emit(buf, "%d\n", mode);
+}
+
+/* The ACPI call to enable the eGPU also disables the internal dGPU */
+static ssize_t egpu_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool enable;
+ int result;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtobool(buf, &enable);
+ if (result)
+ return result;
+
+ asus->egpu_enable = enable;
+
+ result = egpu_enable_write(asus);
+ if (result)
+ return result;
+
+ /* Ensure that the kernel status of dgpu is updated */
+ result = dgpu_disable_check_present(asus);
+ if (result)
+ return result;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(egpu_enable);
+
/* Battery ********************************************************************/
/* The battery maximum charging percentage */
@@ -1221,6 +1409,87 @@ exit:
return result;
}
+/* Panel Overdrive ************************************************************/
+static int panel_od_check_present(struct asus_wmi *asus)
+{
+ u32 result;
+ int err;
+
+ asus->panel_overdrive_available = false;
+
+ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_PANEL_OD, &result);
+ if (err) {
+ if (err == -ENODEV)
+ return 0;
+ return err;
+ }
+
+ if (result & ASUS_WMI_DSTS_PRESENCE_BIT) {
+ asus->panel_overdrive_available = true;
+ asus->panel_overdrive = result & ASUS_WMI_DSTS_STATUS_BIT;
+ }
+
+ return 0;
+}
+
+static int panel_od_write(struct asus_wmi *asus)
+{
+ u32 retval;
+ u8 value;
+ int err;
+
+ /* Don't rely on type conversion */
+ value = asus->panel_overdrive ? 1 : 0;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PANEL_OD, value, &retval);
+
+ if (err) {
+ pr_warn("Failed to set panel overdrive: %d\n", err);
+ return err;
+ }
+
+ if (retval > 1) {
+ pr_warn("Failed to set panel overdrive (retval): 0x%x\n", retval);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "panel_od");
+
+ return 0;
+}
+
+static ssize_t panel_od_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", asus->panel_overdrive);
+}
+
+static ssize_t panel_od_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool overdrive;
+ int result;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtobool(buf, &overdrive);
+ if (result)
+ return result;
+
+ asus->panel_overdrive = overdrive;
+ result = panel_od_write(asus);
+
+ if (result)
+ return result;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(panel_od);
+
/* Quirks *********************************************************************/
static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
@@ -1838,12 +2107,23 @@ static int throttle_thermal_policy_set_default(struct asus_wmi *asus)
static int throttle_thermal_policy_switch_next(struct asus_wmi *asus)
{
u8 new_mode = asus->throttle_thermal_policy_mode + 1;
+ int err;
if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
asus->throttle_thermal_policy_mode = new_mode;
- return throttle_thermal_policy_write(asus);
+ err = throttle_thermal_policy_write(asus);
+ if (err)
+ return err;
+
+ /*
+ * Ensure that platform_profile updates userspace with the change to ensure
+ * that platform_profile and throttle_thermal_policy_mode are in sync.
+ */
+ platform_profile_notify();
+
+ return 0;
}
static ssize_t throttle_thermal_policy_show(struct device *dev,
@@ -1859,9 +2139,10 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int result;
- u8 new_mode;
struct asus_wmi *asus = dev_get_drvdata(dev);
+ u8 new_mode;
+ int result;
+ int err;
result = kstrtou8(buf, 10, &new_mode);
if (result < 0)
@@ -1871,7 +2152,15 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
return -EINVAL;
asus->throttle_thermal_policy_mode = new_mode;
- throttle_thermal_policy_write(asus);
+ err = throttle_thermal_policy_write(asus);
+ if (err)
+ return err;
+
+ /*
+ * Ensure that platform_profile updates userspace with the change to ensure
+ * that platform_profile and throttle_thermal_policy_mode are in sync.
+ */
+ platform_profile_notify();
return count;
}
@@ -1879,6 +2168,91 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent
static DEVICE_ATTR_RW(throttle_thermal_policy);
+/* Platform profile ***********************************************************/
+static int platform_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ struct asus_wmi *asus;
+ int tp;
+
+ asus = container_of(pprof, struct asus_wmi, platform_profile_handler);
+
+ tp = asus->throttle_thermal_policy_mode;
+
+ switch (tp) {
+ case ASUS_THROTTLE_THERMAL_POLICY_DEFAULT:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ case ASUS_THROTTLE_THERMAL_POLICY_SILENT:
+ *profile = PLATFORM_PROFILE_QUIET;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int platform_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ struct asus_wmi *asus;
+ int tp;
+
+ asus = container_of(pprof, struct asus_wmi, platform_profile_handler);
+
+ switch (profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ tp = ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ tp = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+ break;
+ case PLATFORM_PROFILE_QUIET:
+ tp = ASUS_THROTTLE_THERMAL_POLICY_SILENT;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ asus->throttle_thermal_policy_mode = tp;
+ return throttle_thermal_policy_write(asus);
+}
+
+static int platform_profile_setup(struct asus_wmi *asus)
+{
+ struct device *dev = &asus->platform_device->dev;
+ int err;
+
+ /*
+ * Not an error if a component platform_profile relies on is unavailable
+ * so early return, skipping the setup of platform_profile.
+ */
+ if (!asus->throttle_thermal_policy_available)
+ return 0;
+
+ dev_info(dev, "Using throttle_thermal_policy for platform_profile support\n");
+
+ asus->platform_profile_handler.profile_get = platform_profile_get;
+ asus->platform_profile_handler.profile_set = platform_profile_set;
+
+ set_bit(PLATFORM_PROFILE_QUIET, asus->platform_profile_handler.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED,
+ asus->platform_profile_handler.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE,
+ asus->platform_profile_handler.choices);
+
+ err = platform_profile_register(&asus->platform_profile_handler);
+ if (err)
+ return err;
+
+ asus->platform_profile_support = true;
+ return 0;
+}
+
/* Backlight ******************************************************************/
static int read_backlight_power(struct asus_wmi *asus)
@@ -2328,10 +2702,13 @@ static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_touchpad.attr,
+ &dev_attr_egpu_enable.attr,
+ &dev_attr_dgpu_disable.attr,
&dev_attr_lid_resume.attr,
&dev_attr_als_enable.attr,
&dev_attr_fan_boost_mode.attr,
&dev_attr_throttle_thermal_policy.attr,
+ &dev_attr_panel_od.attr,
NULL
};
@@ -2353,10 +2730,16 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
devid = ASUS_WMI_DEVID_LID_RESUME;
else if (attr == &dev_attr_als_enable.attr)
devid = ASUS_WMI_DEVID_ALS_ENABLE;
+ else if (attr == &dev_attr_egpu_enable.attr)
+ ok = asus->egpu_enable_available;
+ else if (attr == &dev_attr_dgpu_disable.attr)
+ ok = asus->dgpu_disable_available;
else if (attr == &dev_attr_fan_boost_mode.attr)
ok = asus->fan_boost_mode_available;
else if (attr == &dev_attr_throttle_thermal_policy.attr)
ok = asus->throttle_thermal_policy_available;
+ else if (attr == &dev_attr_panel_od.attr)
+ ok = asus->panel_overdrive_available;
if (devid != -1)
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
@@ -2612,6 +2995,14 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_platform;
+ err = egpu_enable_check_present(asus);
+ if (err)
+ goto fail_egpu_enable;
+
+ err = dgpu_disable_check_present(asus);
+ if (err)
+ goto fail_dgpu_disable;
+
err = fan_boost_mode_check_present(asus);
if (err)
goto fail_fan_boost_mode;
@@ -2622,6 +3013,14 @@ static int asus_wmi_add(struct platform_device *pdev)
else
throttle_thermal_policy_set_default(asus);
+ err = platform_profile_setup(asus);
+ if (err)
+ goto fail_platform_profile_setup;
+
+ err = panel_od_check_present(asus);
+ if (err)
+ goto fail_panel_od;
+
err = asus_wmi_sysfs_init(asus->platform_device);
if (err)
goto fail_sysfs;
@@ -2707,8 +3106,14 @@ fail_input:
asus_wmi_sysfs_exit(asus->platform_device);
fail_sysfs:
fail_throttle_thermal_policy:
+fail_platform_profile_setup:
+ if (asus->platform_profile_support)
+ platform_profile_remove();
fail_fan_boost_mode:
+fail_egpu_enable:
+fail_dgpu_disable:
fail_platform:
+fail_panel_od:
kfree(asus);
return err;
}
@@ -2728,6 +3133,9 @@ static int asus_wmi_remove(struct platform_device *device)
asus_fan_set_auto(asus);
asus_wmi_battery_exit(asus);
+ if (asus->platform_profile_support)
+ platform_profile_remove();
+
kfree(asus);
return 0;
}
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index 9e7314d90bea..821aba31821c 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -140,7 +140,7 @@ config DELL_SMBIOS_SMM
config DELL_SMO8800
tristate "Dell Latitude freefall driver (ACPI SMO88XX)"
default m
- depends on ACPI
+ depends on ACPI || COMPILE_TEST
help
Say Y here if you want to support SMO88XX freefall devices
on Dell Latitude laptops.
diff --git a/drivers/platform/x86/dell/dcdbas.c b/drivers/platform/x86/dell/dcdbas.c
index 28447c180be8..5e63d6225048 100644
--- a/drivers/platform/x86/dell/dcdbas.c
+++ b/drivers/platform/x86/dell/dcdbas.c
@@ -278,9 +278,9 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
}
/* SMI requires CPU 0 */
- get_online_cpus();
+ cpus_read_lock();
ret = smp_call_on_cpu(0, raise_smi, smi_cmd, true);
- put_online_cpus();
+ cpus_read_unlock();
return ret;
}
diff --git a/drivers/platform/x86/dell/dell-smbios-smm.c b/drivers/platform/x86/dell/dell-smbios-smm.c
index 97c52a839a3e..320c032418ac 100644
--- a/drivers/platform/x86/dell/dell-smbios-smm.c
+++ b/drivers/platform/x86/dell/dell-smbios-smm.c
@@ -24,37 +24,6 @@ static struct calling_interface_buffer *buffer;
static struct platform_device *platform_device;
static DEFINE_MUTEX(smm_mutex);
-static const struct dmi_system_id dell_device_table[] __initconst = {
- {
- .ident = "Dell laptop",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /*Laptop*/
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /*Notebook*/
- },
- },
- {
- .ident = "Dell Computer Corporation",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
- DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
- },
- },
- { }
-};
-MODULE_DEVICE_TABLE(dmi, dell_device_table);
-
static void parse_da_table(const struct dmi_header *dm)
{
struct calling_interface_structure *table =
diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
index 33f823772733..931cc50136de 100644
--- a/drivers/platform/x86/dell/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
@@ -69,9 +69,10 @@ static int run_smbios_call(struct wmi_device *wdev)
if (obj->type == ACPI_TYPE_INTEGER)
dev_dbg(&wdev->dev, "SMBIOS call failed: %llu\n",
obj->integer.value);
+ kfree(output.pointer);
return -EIO;
}
- memcpy(&priv->buf->std, obj->buffer.pointer, obj->buffer.length);
+ memcpy(input.pointer, obj->buffer.pointer, obj->buffer.length);
dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
priv->buf->std.output[0], priv->buf->std.output[1],
priv->buf->std.output[2], priv->buf->std.output[3]);
diff --git a/drivers/platform/x86/dell/dell-smo8800.c b/drivers/platform/x86/dell/dell-smo8800.c
index 5d9304a7de1b..3385e852104c 100644
--- a/drivers/platform/x86/dell/dell-smo8800.c
+++ b/drivers/platform/x86/dell/dell-smo8800.c
@@ -10,13 +10,14 @@
#define DRIVER_NAME "smo8800"
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/acpi.h>
+#include <linux/fs.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/miscdevice.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/uaccess.h>
-#include <linux/fs.h>
struct smo8800_device {
u32 irq; /* acpi device irq */
@@ -44,37 +45,6 @@ static irqreturn_t smo8800_interrupt_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static acpi_status smo8800_get_resource(struct acpi_resource *resource,
- void *context)
-{
- struct acpi_resource_extended_irq *irq;
-
- if (resource->type != ACPI_RESOURCE_TYPE_EXTENDED_IRQ)
- return AE_OK;
-
- irq = &resource->data.extended_irq;
- if (!irq || !irq->interrupt_count)
- return AE_OK;
-
- *((u32 *)context) = irq->interrupts[0];
- return AE_CTRL_TERMINATE;
-}
-
-static u32 smo8800_get_irq(struct acpi_device *device)
-{
- u32 irq = 0;
- acpi_status status;
-
- status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
- smo8800_get_resource, &irq);
- if (ACPI_FAILURE(status)) {
- dev_err(&device->dev, "acpi_walk_resources failed\n");
- return 0;
- }
-
- return irq;
-}
-
static ssize_t smo8800_misc_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
@@ -136,7 +106,7 @@ static const struct file_operations smo8800_misc_fops = {
.release = smo8800_misc_release,
};
-static int smo8800_add(struct acpi_device *device)
+static int smo8800_probe(struct platform_device *device)
{
int err;
struct smo8800_device *smo8800;
@@ -160,14 +130,12 @@ static int smo8800_add(struct acpi_device *device)
return err;
}
- device->driver_data = smo8800;
+ platform_set_drvdata(device, smo8800);
- smo8800->irq = smo8800_get_irq(device);
- if (!smo8800->irq) {
- dev_err(&device->dev, "failed to obtain IRQ\n");
- err = -EINVAL;
+ err = platform_get_irq(device, 0);
+ if (err < 0)
goto error;
- }
+ smo8800->irq = err;
err = request_threaded_irq(smo8800->irq, smo8800_interrupt_quick,
smo8800_interrupt_thread,
@@ -189,9 +157,9 @@ error:
return err;
}
-static int smo8800_remove(struct acpi_device *device)
+static int smo8800_remove(struct platform_device *device)
{
- struct smo8800_device *smo8800 = device->driver_data;
+ struct smo8800_device *smo8800 = platform_get_drvdata(device);
free_irq(smo8800->irq, smo8800);
misc_deregister(&smo8800->miscdev);
@@ -211,21 +179,17 @@ static const struct acpi_device_id smo8800_ids[] = {
{ "SMO8831", 0 },
{ "", 0 },
};
-
MODULE_DEVICE_TABLE(acpi, smo8800_ids);
-static struct acpi_driver smo8800_driver = {
- .name = DRIVER_NAME,
- .class = "Latitude",
- .ids = smo8800_ids,
- .ops = {
- .add = smo8800_add,
- .remove = smo8800_remove,
+static struct platform_driver smo8800_driver = {
+ .probe = smo8800_probe,
+ .remove = smo8800_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .acpi_match_table = smo8800_ids,
},
- .owner = THIS_MODULE,
};
-
-module_acpi_driver(smo8800_driver);
+module_platform_driver(smo8800_driver);
MODULE_DESCRIPTION("Dell Latitude freefall driver (ACPI SMO88XX)");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dual_accel_detect.h b/drivers/platform/x86/dual_accel_detect.h
index a9eae17cc43d..72e9624331c8 100644
--- a/drivers/platform/x86/dual_accel_detect.h
+++ b/drivers/platform/x86/dual_accel_detect.h
@@ -17,30 +17,6 @@
#include <linux/acpi.h>
#include <linux/i2c.h>
-static int dual_accel_i2c_resource_count(struct acpi_resource *ares, void *data)
-{
- struct acpi_resource_i2c_serialbus *sb;
- int *count = data;
-
- if (i2c_acpi_get_i2c_resource(ares, &sb))
- *count = *count + 1;
-
- return 1;
-}
-
-static int dual_accel_i2c_client_count(struct acpi_device *adev)
-{
- int ret, count = 0;
- LIST_HEAD(r);
-
- ret = acpi_dev_get_resources(adev, &r, dual_accel_i2c_resource_count, &count);
- if (ret < 0)
- return ret;
-
- acpi_dev_free_resource_list(&r);
- return count;
-}
-
static bool dual_accel_detect_bosc0200(void)
{
struct acpi_device *adev;
@@ -50,7 +26,7 @@ static bool dual_accel_detect_bosc0200(void)
if (!adev)
return false;
- count = dual_accel_i2c_client_count(adev);
+ count = i2c_acpi_client_count(adev);
acpi_dev_put(adev);
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 8c0867bda828..cc53f725c041 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -28,9 +28,6 @@
#include <linux/serio.h>
#include "../../misc/lis3lv02d/lis3lv02d.h"
-#define DRIVER_NAME "hp_accel"
-#define ACPI_MDPS_CLASS "accelerometer"
-
/* Delayed LEDs infrastructure ------------------------------------ */
/* Special LED class that can defer work */
@@ -78,23 +75,14 @@ static const struct acpi_device_id lis3lv02d_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
-
/**
- * lis3lv02d_acpi_init - ACPI _INI method: initialize the device.
+ * lis3lv02d_acpi_init - initialize the device for ACPI
* @lis3: pointer to the device struct
*
* Returns 0 on success.
*/
static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
{
- struct acpi_device *dev = lis3->bus_priv;
- if (!lis3->init_required)
- return 0;
-
- if (acpi_evaluate_object(dev->handle, METHOD_NAME__INI,
- NULL, NULL) != AE_OK)
- return -EINVAL;
-
return 0;
}
@@ -278,30 +266,6 @@ static struct delayed_led_classdev hpled_led = {
.set_brightness = hpled_set,
};
-static acpi_status
-lis3lv02d_get_resource(struct acpi_resource *resource, void *context)
-{
- if (resource->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
- struct acpi_resource_extended_irq *irq;
- u32 *device_irq = context;
-
- irq = &resource->data.extended_irq;
- *device_irq = irq->interrupts[0];
- }
-
- return AE_OK;
-}
-
-static void lis3lv02d_enum_resources(struct acpi_device *device)
-{
- acpi_status status;
-
- status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
- lis3lv02d_get_resource, &lis3_dev.irq);
- if (ACPI_FAILURE(status))
- printk(KERN_DEBUG DRIVER_NAME ": Error getting resources\n");
-}
-
static bool hp_accel_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
{
@@ -331,23 +295,19 @@ static bool hp_accel_i8042_filter(unsigned char data, unsigned char str,
return false;
}
-static int lis3lv02d_add(struct acpi_device *device)
+static int lis3lv02d_probe(struct platform_device *device)
{
int ret;
- if (!device)
- return -EINVAL;
-
- lis3_dev.bus_priv = device;
+ lis3_dev.bus_priv = ACPI_COMPANION(&device->dev);
lis3_dev.init = lis3lv02d_acpi_init;
lis3_dev.read = lis3lv02d_acpi_read;
lis3_dev.write = lis3lv02d_acpi_write;
- strcpy(acpi_device_name(device), DRIVER_NAME);
- strcpy(acpi_device_class(device), ACPI_MDPS_CLASS);
- device->driver_data = &lis3_dev;
/* obtain IRQ number of our device from ACPI */
- lis3lv02d_enum_resources(device);
+ ret = platform_get_irq_optional(device, 0);
+ if (ret > 0)
+ lis3_dev.irq = ret;
/* If possible use a "standard" axes order */
if (lis3_dev.ac.x && lis3_dev.ac.y && lis3_dev.ac.z) {
@@ -359,7 +319,6 @@ static int lis3lv02d_add(struct acpi_device *device)
}
/* call the core layer do its init */
- lis3_dev.init_required = true;
ret = lis3lv02d_init_device(&lis3_dev);
if (ret)
return ret;
@@ -381,11 +340,8 @@ static int lis3lv02d_add(struct acpi_device *device)
return ret;
}
-static int lis3lv02d_remove(struct acpi_device *device)
+static int lis3lv02d_remove(struct platform_device *device)
{
- if (!device)
- return -EINVAL;
-
i8042_remove_filter(hp_accel_i8042_filter);
lis3lv02d_joystick_disable(&lis3_dev);
lis3lv02d_poweroff(&lis3_dev);
@@ -396,7 +352,6 @@ static int lis3lv02d_remove(struct acpi_device *device)
return lis3lv02d_remove_fs(&lis3_dev);
}
-
#ifdef CONFIG_PM_SLEEP
static int lis3lv02d_suspend(struct device *dev)
{
@@ -407,14 +362,12 @@ static int lis3lv02d_suspend(struct device *dev)
static int lis3lv02d_resume(struct device *dev)
{
- lis3_dev.init_required = false;
lis3lv02d_poweron(&lis3_dev);
return 0;
}
static int lis3lv02d_restore(struct device *dev)
{
- lis3_dev.init_required = true;
lis3lv02d_poweron(&lis3_dev);
return 0;
}
@@ -434,17 +387,16 @@ static const struct dev_pm_ops hp_accel_pm = {
#endif
/* For the HP MDPS aka 3D Driveguard */
-static struct acpi_driver lis3lv02d_driver = {
- .name = DRIVER_NAME,
- .class = ACPI_MDPS_CLASS,
- .ids = lis3lv02d_device_ids,
- .ops = {
- .add = lis3lv02d_add,
- .remove = lis3lv02d_remove,
+static struct platform_driver lis3lv02d_driver = {
+ .probe = lis3lv02d_probe,
+ .remove = lis3lv02d_remove,
+ .driver = {
+ .name = "hp_accel",
+ .pm = HP_ACCEL_PM,
+ .acpi_match_table = lis3lv02d_device_ids,
},
- .drv.pm = HP_ACCEL_PM,
};
-module_acpi_driver(lis3lv02d_driver);
+module_platform_driver(lis3lv02d_driver);
MODULE_DESCRIPTION("Glue between LIS3LV02Dx and HP ACPI BIOS and support for disk protection LED.");
MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek");
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
index 2cce82579d09..a50153ecd560 100644
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -32,31 +32,6 @@ struct i2c_multi_inst_data {
struct i2c_client *clients[];
};
-static int i2c_multi_inst_count(struct acpi_resource *ares, void *data)
-{
- struct acpi_resource_i2c_serialbus *sb;
- int *count = data;
-
- if (i2c_acpi_get_i2c_resource(ares, &sb))
- *count = *count + 1;
-
- return 1;
-}
-
-static int i2c_multi_inst_count_resources(struct acpi_device *adev)
-{
- LIST_HEAD(r);
- int count = 0;
- int ret;
-
- ret = acpi_dev_get_resources(adev, &r, i2c_multi_inst_count, &count);
- if (ret < 0)
- return ret;
-
- acpi_dev_free_resource_list(&r);
- return count;
-}
-
static int i2c_multi_inst_probe(struct platform_device *pdev)
{
struct i2c_multi_inst_data *multi;
@@ -76,7 +51,7 @@ static int i2c_multi_inst_probe(struct platform_device *pdev)
adev = ACPI_COMPANION(dev);
/* Count number of clients to instantiate */
- ret = i2c_multi_inst_count_resources(adev);
+ ret = i2c_acpi_client_count(adev);
if (ret < 0)
return ret;
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 784326bd72f0..e7a1299e3776 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -41,6 +41,7 @@
static const char *const ideapad_wmi_fnesc_events[] = {
"26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6", /* Yoga 3 */
"56322276-8493-4CE8-A783-98C991274F5E", /* Yoga 700 */
+ "8FC0DE0C-B4E4-43FD-B0F3-8871711C1294", /* Legion 5 */
};
#endif
@@ -1459,11 +1460,19 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
static void ideapad_wmi_notify(u32 value, void *context)
{
struct ideapad_private *priv = context;
+ unsigned long result;
switch (value) {
case 128:
ideapad_input_report(priv, value);
break;
+ case 208:
+ if (!eval_hals(priv->adev->handle, &result)) {
+ bool state = test_bit(HALS_FNLOCK_STATE_BIT, &result);
+
+ exec_sals(priv->adev->handle, state ? SALS_FNLOCK_ON : SALS_FNLOCK_OFF);
+ }
+ break;
default:
dev_info(&priv->platform_device->dev,
"Unknown WMI event: %u\n", value);
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index f2eef337eb98..0b21468e1bd0 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -16,7 +16,156 @@ menuconfig X86_PLATFORM_DRIVERS_INTEL
if X86_PLATFORM_DRIVERS_INTEL
+source "drivers/platform/x86/intel/atomisp2/Kconfig"
+source "drivers/platform/x86/intel/int1092/Kconfig"
source "drivers/platform/x86/intel/int33fe/Kconfig"
source "drivers/platform/x86/intel/int3472/Kconfig"
+source "drivers/platform/x86/intel/pmc/Kconfig"
+source "drivers/platform/x86/intel/pmt/Kconfig"
+source "drivers/platform/x86/intel/speed_select_if/Kconfig"
+source "drivers/platform/x86/intel/telemetry/Kconfig"
+source "drivers/platform/x86/intel/wmi/Kconfig"
+
+config INTEL_HID_EVENT
+ tristate "Intel HID Event"
+ depends on ACPI
+ depends on INPUT
+ depends on I2C
+ select INPUT_SPARSEKMAP
+ help
+ This driver provides support for the Intel HID Event hotkey interface.
+ Some laptops require this driver for hotkey support.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_hid.
+
+config INTEL_VBTN
+ tristate "Intel Virtual Button"
+ depends on ACPI
+ depends on INPUT
+ depends on I2C
+ select INPUT_SPARSEKMAP
+ help
+ This driver provides support for the Intel Virtual Button interface.
+ Some laptops require this driver for power button support.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_vbtn.
+
+config INTEL_INT0002_VGPIO
+ tristate "Intel ACPI INT0002 Virtual GPIO driver"
+ depends on GPIOLIB && ACPI && PM_SLEEP
+ select GPIOLIB_IRQCHIP
+ help
+ Some peripherals on Bay Trail and Cherry Trail platforms signal a
+ Power Management Event (PME) to the Power Management Controller (PMC)
+ to wakeup the system. When this happens software needs to explicitly
+ clear the PME bus 0 status bit in the GPE0a_STS register to avoid an
+ IRQ storm on IRQ 9.
+
+ This is modelled in ACPI through the INT0002 ACPI device, which is
+ called a "Virtual GPIO controller" in ACPI because it defines the
+ event handler to call when the PME triggers through _AEI and _L02
+ methods as would be done for a real GPIO interrupt in ACPI.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_int0002_vgpio.
+
+config INTEL_OAKTRAIL
+ tristate "Intel Oaktrail Platform Extras"
+ depends on ACPI
+ depends on ACPI_VIDEO || ACPI_VIDEO=n
+ depends on RFKILL && BACKLIGHT_CLASS_DEVICE && ACPI
+ help
+ Intel Oaktrail platform need this driver to provide interfaces to
+ enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y
+ here; it will only load on supported platforms.
+
+config INTEL_BXTWC_PMIC_TMU
+ tristate "Intel Broxton Whiskey Cove TMU Driver"
+ depends on INTEL_SOC_PMIC_BXTWC
+ depends on MFD_INTEL_PMC_BXT
+ select REGMAP
+ help
+ Select this driver to use Intel Broxton Whiskey Cove PMIC TMU feature.
+ This driver enables the alarm wakeup functionality in the TMU unit of
+ Whiskey Cove PMIC.
+
+config INTEL_CHTDC_TI_PWRBTN
+ tristate "Intel Cherry Trail Dollar Cove TI power button driver"
+ depends on INTEL_SOC_PMIC_CHTDC_TI
+ depends on INPUT
+ help
+ This option adds a power button driver for Dollar Cove TI
+ PMIC on Intel Cherry Trail devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_chtdc_ti_pwrbtn.
+
+config INTEL_MRFLD_PWRBTN
+ tristate "Intel Merrifield Basin Cove power button driver"
+ depends on INTEL_SOC_PMIC_MRFLD
+ depends on INPUT
+ help
+ This option adds a power button driver for Basin Cove PMIC
+ on Intel Merrifield devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_mrfld_pwrbtn.
+
+config INTEL_PUNIT_IPC
+ tristate "Intel P-Unit IPC Driver"
+ help
+ This driver provides support for Intel P-Unit Mailbox IPC mechanism,
+ which is used to bridge the communications between kernel and P-Unit.
+
+config INTEL_RST
+ tristate "Intel Rapid Start Technology Driver"
+ depends on ACPI
+ help
+ This driver provides support for modifying parameters on systems
+ equipped with Intel's Rapid Start Technology. When put in an ACPI
+ sleep state, these devices will wake after either a configured
+ timeout or when the system battery reaches a critical state,
+ automatically copying memory contents to disk. On resume, the
+ firmware will copy the memory contents back to RAM and resume the OS
+ as usual.
+
+config INTEL_SMARTCONNECT
+ tristate "Intel Smart Connect disabling driver"
+ depends on ACPI
+ help
+ Intel Smart Connect is a technology intended to permit devices to
+ update state by resuming for a short period of time at regular
+ intervals. If a user enables this functionality under Windows and
+ then reboots into Linux, the system may remain configured to resume
+ on suspend. In the absence of any userspace to support it, the system
+ will then remain awake until something triggers another suspend.
+
+ This driver checks to determine whether the device has Intel Smart
+ Connect enabled, and if so disables it.
+
+config INTEL_TURBO_MAX_3
+ bool "Intel Turbo Boost Max Technology 3.0 enumeration driver"
+ depends on X86_64 && SCHED_MC_PRIO
+ help
+ This driver reads maximum performance ratio of each CPU and set up
+ the scheduler priority metrics. In this way scheduler can prefer
+ CPU with higher performance to schedule tasks.
+
+ This driver is only required when the system is not using Hardware
+ P-States (HWP). In HWP mode, priority can be read from ACPI tables.
+
+config INTEL_UNCORE_FREQ_CONTROL
+ tristate "Intel Uncore frequency control driver"
+ depends on X86_64
+ help
+ This driver allows control of Uncore frequency limits on
+ supported server platforms.
+
+ Uncore frequency controls RING/LLC (last-level cache) clocks.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-uncore-frequency.
endif # X86_PLATFORM_DRIVERS_INTEL
diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile
index 0653055942d5..8b3a3f7bab49 100644
--- a/drivers/platform/x86/intel/Makefile
+++ b/drivers/platform/x86/intel/Makefile
@@ -4,5 +4,44 @@
# Intel x86 Platform-Specific Drivers
#
+obj-$(CONFIG_INTEL_ATOMISP2_PDX86) += atomisp2/
+obj-$(CONFIG_INTEL_SAR_INT1092) += int1092/
obj-$(CONFIG_INTEL_CHT_INT33FE) += int33fe/
obj-$(CONFIG_INTEL_SKL_INT3472) += int3472/
+obj-$(CONFIG_INTEL_PMC_CORE) += pmc/
+obj-$(CONFIG_INTEL_PMT_CLASS) += pmt/
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += speed_select_if/
+obj-$(CONFIG_INTEL_TELEMETRY) += telemetry/
+obj-$(CONFIG_INTEL_WMI) += wmi/
+
+# Intel input drivers
+intel-hid-y := hid.o
+obj-$(CONFIG_INTEL_HID_EVENT) += intel-hid.o
+intel-vbtn-y := vbtn.o
+obj-$(CONFIG_INTEL_VBTN) += intel-vbtn.o
+
+# Intel miscellaneous drivers
+intel_int0002_vgpio-y := int0002_vgpio.o
+obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o
+intel_oaktrail-y := oaktrail.o
+obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
+
+# Intel PMIC / PMC / P-Unit drivers
+intel_bxtwc_tmu-y := bxtwc_tmu.o
+obj-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += intel_bxtwc_tmu.o
+intel_chtdc_ti_pwrbtn-y := chtdc_ti_pwrbtn.o
+obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
+intel_mrfld_pwrbtn-y := mrfld_pwrbtn.o
+obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o
+intel_punit_ipc-y := punit_ipc.o
+obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
+
+# Intel Uncore drivers
+intel-rst-y := rst.o
+obj-$(CONFIG_INTEL_RST) += intel-rst.o
+intel-smartconnect-y := smartconnect.o
+obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
+intel_turbo_max_3-y := turbo_max_3.o
+obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
+intel-uncore-frequency-y := uncore-frequency.o
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o
diff --git a/drivers/platform/x86/intel/atomisp2/Kconfig b/drivers/platform/x86/intel/atomisp2/Kconfig
new file mode 100644
index 000000000000..35dd2be9d2a1
--- /dev/null
+++ b/drivers/platform/x86/intel/atomisp2/Kconfig
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+config INTEL_ATOMISP2_PDX86
+ bool
+
+config INTEL_ATOMISP2_LED
+ tristate "Intel AtomISP v2 camera LED driver"
+ depends on GPIOLIB && LEDS_GPIO
+ select INTEL_ATOMISP2_PDX86
+ help
+ Many Bay Trail and Cherry Trail devices come with a camera attached
+ to Intel's Image Signal Processor. Linux currently does not have a
+ driver for these, so they do not work as a camera. Some of these
+ camera's have a LED which is controlled through a GPIO.
+
+ Some of these devices have a firmware issue where the LED gets turned
+ on at boot. This driver will turn the LED off at boot and also allows
+ controlling the LED (repurposing it) through the sysfs LED interface.
+
+ Which GPIO is attached to the LED is usually not described in the
+ ACPI tables, so this driver contains per-system info about the GPIO
+ inside the driver, this means that this driver only works on systems
+ the driver knows about.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_atomisp2_led.
+
+config INTEL_ATOMISP2_PM
+ tristate "Intel AtomISP v2 dummy / power-management driver"
+ depends on PCI && IOSF_MBI && PM
+ depends on !INTEL_ATOMISP
+ select INTEL_ATOMISP2_PDX86
+ help
+ Power-management driver for Intel's Image Signal Processor found on
+ Bay Trail and Cherry Trail devices. This dummy driver's sole purpose
+ is to turn the ISP off (put it in D3) to save power and to allow
+ entering of S0ix modes.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_atomisp2_pm.
diff --git a/drivers/platform/x86/intel/atomisp2/Makefile b/drivers/platform/x86/intel/atomisp2/Makefile
new file mode 100644
index 000000000000..96b1e877d1f1
--- /dev/null
+++ b/drivers/platform/x86/intel/atomisp2/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+intel_atomisp2_led-y := led.o
+obj-$(CONFIG_INTEL_ATOMISP2_LED) += intel_atomisp2_led.o
+intel_atomisp2_pm-y += pm.o
+obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
diff --git a/drivers/platform/x86/intel_atomisp2_led.c b/drivers/platform/x86/intel/atomisp2/led.c
index 5935dfca166f..5935dfca166f 100644
--- a/drivers/platform/x86/intel_atomisp2_led.c
+++ b/drivers/platform/x86/intel/atomisp2/led.c
diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel/atomisp2/pm.c
index 805fc0d8515c..805fc0d8515c 100644
--- a/drivers/platform/x86/intel_atomisp2_pm.c
+++ b/drivers/platform/x86/intel/atomisp2/pm.c
diff --git a/drivers/platform/x86/intel_bxtwc_tmu.c b/drivers/platform/x86/intel/bxtwc_tmu.c
index 7ccf583649e6..7ccf583649e6 100644
--- a/drivers/platform/x86/intel_bxtwc_tmu.c
+++ b/drivers/platform/x86/intel/bxtwc_tmu.c
diff --git a/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c b/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c
index 9606a994af22..9606a994af22 100644
--- a/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c
+++ b/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel/hid.c
index 2e4e97a626a5..a33a5826e81a 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -14,7 +14,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/suspend.h>
-#include "dual_accel_detect.h"
+#include "../dual_accel_detect.h"
/* When NOT in tablet mode, VGBS returns with the flag 0x40 */
#define TABLET_MODE_FLAG BIT(6)
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index 569342aa8926..569342aa8926 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
diff --git a/drivers/platform/x86/intel/int1092/Kconfig b/drivers/platform/x86/intel/int1092/Kconfig
new file mode 100644
index 000000000000..2e9a177241aa
--- /dev/null
+++ b/drivers/platform/x86/intel/int1092/Kconfig
@@ -0,0 +1,14 @@
+config INTEL_SAR_INT1092
+ tristate "Intel Specific Absorption Rate Driver"
+ depends on ACPI
+ help
+ This driver helps to limit the exposure of human body to RF frequency by
+ providing information to userspace application that will inform the Intel
+ M.2 modem to regulate the RF power based on SAR data obtained from the
+ sensors captured in the BIOS. ACPI interface exposes this data from the BIOS
+ to SAR driver. The front end application in userspace will interact with SAR
+ driver to obtain information like the device mode, Antenna index, baseband index,
+ SAR table index and use available communication like MBIM interface to enable
+ data communication to modem for RF power regulation. Enable this config when
+ given platform needs to support "Dynamic SAR" configuration for a modem available
+ on the platform.
diff --git a/drivers/platform/x86/intel/int1092/Makefile b/drivers/platform/x86/intel/int1092/Makefile
new file mode 100644
index 000000000000..4ab94e541de3
--- /dev/null
+++ b/drivers/platform/x86/intel/int1092/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_INTEL_SAR_INT1092) += intel_sar.o
diff --git a/drivers/platform/x86/intel/int1092/intel_sar.c b/drivers/platform/x86/intel/int1092/intel_sar.c
new file mode 100644
index 000000000000..379560fe5df9
--- /dev/null
+++ b/drivers/platform/x86/intel/int1092/intel_sar.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include "intel_sar.h"
+
+/**
+ * get_int_value: Retrieve integer values from ACPI Object
+ * @obj: acpi_object pointer which has the integer value
+ * @out: output pointer will get integer value
+ *
+ * Function is used to retrieve integer value from acpi object.
+ *
+ * Return:
+ * * 0 on success
+ * * -EIO if there is an issue in acpi_object passed.
+ */
+static int get_int_value(union acpi_object *obj, int *out)
+{
+ if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ return -EIO;
+ *out = (int)obj->integer.value;
+ return 0;
+}
+
+/**
+ * update_sar_data: sar data is updated based on regulatory mode
+ * @context: pointer to driver context structure
+ *
+ * sar_data is updated based on regulatory value
+ * context->reg_value will never exceed MAX_REGULATORY
+ */
+static void update_sar_data(struct wwan_sar_context *context)
+{
+ struct wwan_device_mode_configuration *config =
+ &context->config_data[context->reg_value];
+
+ if (config->device_mode_info &&
+ context->sar_data.device_mode < config->total_dev_mode) {
+ struct wwan_device_mode_info *dev_mode =
+ &config->device_mode_info[context->sar_data.device_mode];
+
+ context->sar_data.antennatable_index = dev_mode->antennatable_index;
+ context->sar_data.bandtable_index = dev_mode->bandtable_index;
+ context->sar_data.sartable_index = dev_mode->sartable_index;
+ }
+}
+
+/**
+ * parse_package: parse acpi package for retrieving SAR information
+ * @context: pointer to driver context structure
+ * @item : acpi_object pointer
+ *
+ * Given acpi_object is iterated to retrieve information for each device mode.
+ * If a given package corresponding to a specific device mode is faulty, it is
+ * skipped and the specific entry in context structure will have the default value
+ * of zero. Decoding of subsequent device modes is realized by having "continue"
+ * statements in the for loop on encountering error in parsing given device mode.
+ *
+ * Return:
+ * AE_OK if success
+ * AE_ERROR on error
+ */
+static acpi_status parse_package(struct wwan_sar_context *context, union acpi_object *item)
+{
+ struct wwan_device_mode_configuration *data;
+ int value, itr, reg;
+ union acpi_object *num;
+
+ num = &item->package.elements[0];
+ if (get_int_value(num, &value) || value < 0 || value >= MAX_REGULATORY)
+ return AE_ERROR;
+
+ reg = value;
+
+ data = &context->config_data[reg];
+ if (data->total_dev_mode > MAX_DEV_MODES || data->total_dev_mode == 0 ||
+ item->package.count <= data->total_dev_mode)
+ return AE_ERROR;
+
+ data->device_mode_info = kmalloc_array(data->total_dev_mode,
+ sizeof(struct wwan_device_mode_info), GFP_KERNEL);
+ if (!data->device_mode_info)
+ return AE_ERROR;
+
+ for (itr = 0; itr < data->total_dev_mode; itr++) {
+ struct wwan_device_mode_info temp = { 0 };
+
+ num = &item->package.elements[itr + 1];
+ if (num->type != ACPI_TYPE_PACKAGE || num->package.count < TOTAL_DATA)
+ continue;
+ if (get_int_value(&num->package.elements[0], &temp.device_mode))
+ continue;
+ if (get_int_value(&num->package.elements[1], &temp.bandtable_index))
+ continue;
+ if (get_int_value(&num->package.elements[2], &temp.antennatable_index))
+ continue;
+ if (get_int_value(&num->package.elements[3], &temp.sartable_index))
+ continue;
+ data->device_mode_info[itr] = temp;
+ }
+ return AE_OK;
+}
+
+/**
+ * sar_get_device_mode: Extraction of information from BIOS via DSM calls
+ * @device: ACPI device for which to retrieve the data
+ *
+ * Retrieve the current device mode information from the BIOS.
+ *
+ * Return:
+ * AE_OK on success
+ * AE_ERROR on error
+ */
+static acpi_status sar_get_device_mode(struct platform_device *device)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(&device->dev);
+ acpi_status status = AE_OK;
+ union acpi_object *out;
+ u32 rev = 0;
+ int value;
+
+ out = acpi_evaluate_dsm(context->handle, &context->guid, rev,
+ COMMAND_ID_DEV_MODE, NULL);
+ if (get_int_value(out, &value)) {
+ dev_err(&device->dev, "DSM cmd:%d Failed to retrieve value\n", COMMAND_ID_DEV_MODE);
+ status = AE_ERROR;
+ goto dev_mode_error;
+ }
+ context->sar_data.device_mode = value;
+ update_sar_data(context);
+ sysfs_notify(&device->dev.kobj, NULL, SYSFS_DATANAME);
+
+dev_mode_error:
+ ACPI_FREE(out);
+ return status;
+}
+
+static const struct acpi_device_id sar_device_ids[] = {
+ { "INTC1092", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, sar_device_ids);
+
+static ssize_t intc_data_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d %d %d %d\n", context->sar_data.device_mode,
+ context->sar_data.bandtable_index,
+ context->sar_data.antennatable_index,
+ context->sar_data.sartable_index);
+}
+static DEVICE_ATTR_RO(intc_data);
+
+static ssize_t intc_reg_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", context->reg_value);
+}
+
+static ssize_t intc_reg_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(dev);
+ unsigned int value;
+ int read;
+
+ if (!count)
+ return -EINVAL;
+ read = kstrtouint(buf, 10, &value);
+ if (read < 0)
+ return read;
+ if (value >= MAX_REGULATORY)
+ return -EOVERFLOW;
+ context->reg_value = value;
+ update_sar_data(context);
+ sysfs_notify(&dev->kobj, NULL, SYSFS_DATANAME);
+ return count;
+}
+static DEVICE_ATTR_RW(intc_reg);
+
+static struct attribute *intcsar_attrs[] = {
+ &dev_attr_intc_data.attr,
+ &dev_attr_intc_reg.attr,
+ NULL
+};
+
+static struct attribute_group intcsar_group = {
+ .attrs = intcsar_attrs,
+};
+
+static void sar_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct platform_device *device = data;
+
+ if (event == SAR_EVENT) {
+ if (sar_get_device_mode(device) != AE_OK)
+ dev_err(&device->dev, "sar_get_device_mode error");
+ }
+}
+
+static void sar_get_data(int reg, struct wwan_sar_context *context)
+{
+ union acpi_object *out, req;
+ u32 rev = 0;
+
+ req.type = ACPI_TYPE_INTEGER;
+ req.integer.value = reg;
+ out = acpi_evaluate_dsm(context->handle, &context->guid, rev,
+ COMMAND_ID_CONFIG_TABLE, &req);
+ if (!out)
+ return;
+ if (out->type == ACPI_TYPE_PACKAGE && out->package.count >= 3 &&
+ out->package.elements[0].type == ACPI_TYPE_INTEGER &&
+ out->package.elements[1].type == ACPI_TYPE_INTEGER &&
+ out->package.elements[2].type == ACPI_TYPE_PACKAGE &&
+ out->package.elements[2].package.count > 0) {
+ context->config_data[reg].version = out->package.elements[0].integer.value;
+ context->config_data[reg].total_dev_mode =
+ out->package.elements[1].integer.value;
+ if (context->config_data[reg].total_dev_mode <= 0 ||
+ context->config_data[reg].total_dev_mode > MAX_DEV_MODES) {
+ ACPI_FREE(out);
+ return;
+ }
+ parse_package(context, &out->package.elements[2]);
+ }
+ ACPI_FREE(out);
+}
+
+static int sar_probe(struct platform_device *device)
+{
+ struct wwan_sar_context *context;
+ int reg;
+ int result;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ context->sar_device = device;
+ context->handle = ACPI_HANDLE(&device->dev);
+ dev_set_drvdata(&device->dev, context);
+
+ result = guid_parse(SAR_DSM_UUID, &context->guid);
+ if (result) {
+ dev_err(&device->dev, "SAR UUID parse error: %d\n", result);
+ goto r_free;
+ }
+
+ for (reg = 0; reg < MAX_REGULATORY; reg++)
+ sar_get_data(reg, context);
+
+ if (sar_get_device_mode(device) != AE_OK) {
+ dev_err(&device->dev, "Failed to get device mode\n");
+ result = -EIO;
+ goto r_free;
+ }
+
+ result = sysfs_create_group(&device->dev.kobj, &intcsar_group);
+ if (result) {
+ dev_err(&device->dev, "sysfs creation failed\n");
+ goto r_free;
+ }
+
+ if (acpi_install_notify_handler(ACPI_HANDLE(&device->dev), ACPI_DEVICE_NOTIFY,
+ sar_notify, (void *)device) != AE_OK) {
+ dev_err(&device->dev, "Failed acpi_install_notify_handler\n");
+ result = -EIO;
+ goto r_sys;
+ }
+ return 0;
+
+r_sys:
+ sysfs_remove_group(&device->dev.kobj, &intcsar_group);
+r_free:
+ kfree(context);
+ return result;
+}
+
+static int sar_remove(struct platform_device *device)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(&device->dev);
+ int reg;
+
+ acpi_remove_notify_handler(ACPI_HANDLE(&device->dev),
+ ACPI_DEVICE_NOTIFY, sar_notify);
+ sysfs_remove_group(&device->dev.kobj, &intcsar_group);
+ for (reg = 0; reg < MAX_REGULATORY; reg++)
+ kfree(context->config_data[reg].device_mode_info);
+
+ kfree(context);
+ return 0;
+}
+
+static struct platform_driver sar_driver = {
+ .probe = sar_probe,
+ .remove = sar_remove,
+ .driver = {
+ .name = DRVNAME,
+ .owner = THIS_MODULE,
+ .acpi_match_table = ACPI_PTR(sar_device_ids)
+ }
+};
+module_platform_driver(sar_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Platform device driver for INTEL MODEM BIOS SAR");
+MODULE_AUTHOR("Shravan S <s.shravan@intel.com>");
diff --git a/drivers/platform/x86/intel/int1092/intel_sar.h b/drivers/platform/x86/intel/int1092/intel_sar.h
new file mode 100644
index 000000000000..b5310510b84c
--- /dev/null
+++ b/drivers/platform/x86/intel/int1092/intel_sar.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, Intel Corporation.
+ */
+#ifndef INTEL_SAR_H
+#define INTEL_SAR_H
+
+#define COMMAND_ID_DEV_MODE 1
+#define COMMAND_ID_CONFIG_TABLE 2
+#define DRVNAME "intc_sar"
+#define MAX_DEV_MODES 50
+#define MAX_REGULATORY 3
+#define SAR_DSM_UUID "82737E72-3A33-4C45-A9C7-57C0411A5F13"
+#define SAR_EVENT 0x80
+#define SYSFS_DATANAME "intc_data"
+#define TOTAL_DATA 4
+
+/**
+ * Structure wwan_device_mode_info - device mode information
+ * Holds the data that needs to be passed to userspace.
+ * The data is updated from the BIOS sensor information.
+ * @device_mode: Specific mode of the device
+ * @bandtable_index: Index of RF band
+ * @antennatable_index: Index of antenna
+ * @sartable_index: Index of SAR
+ */
+struct wwan_device_mode_info {
+ int device_mode;
+ int bandtable_index;
+ int antennatable_index;
+ int sartable_index;
+};
+
+/**
+ * Structure wwan_device_mode_configuration - device configuration
+ * Holds the data that is configured and obtained on probe event.
+ * The data is updated from the BIOS sensor information.
+ * @version: Mode configuration version
+ * @total_dev_mode: Total number of device modes
+ * @device_mode_info: pointer to structure wwan_device_mode_info
+ */
+struct wwan_device_mode_configuration {
+ int version;
+ int total_dev_mode;
+ struct wwan_device_mode_info *device_mode_info;
+};
+
+/**
+ * Structure wwan_supported_info - userspace datastore
+ * Holds the data that is obtained from userspace
+ * The data is updated from the userspace and send value back in the
+ * structure format that is mentioned here.
+ * @reg_mode_needed: regulatory mode set by user for tests
+ * @bios_table_revision: Version of SAR table
+ * @num_supported_modes: Total supported modes based on reg_mode
+ */
+struct wwan_supported_info {
+ int reg_mode_needed;
+ int bios_table_revision;
+ int num_supported_modes;
+};
+
+/**
+ * Structure wwan_sar_context - context of SAR
+ * Holds the complete context as long as the driver is in existence
+ * The context holds instance of the data used for different cases.
+ * @guid: Group id
+ * @handle: store acpi handle
+ * @reg_value: regulatory value
+ * Regulatory 0: FCC, 1: CE, 2: ISED
+ * @sar_device: platform_device type
+ * @sar_kobject: kobject for sysfs
+ * @supported_data: wwan_supported_info struct
+ * @sar_data: wwan_device_mode_info struct
+ * @config_data: wwan_device_mode_configuration array struct
+ */
+struct wwan_sar_context {
+ guid_t guid;
+ acpi_handle handle;
+ int reg_value;
+ struct platform_device *sar_device;
+ struct wwan_supported_info supported_data;
+ struct wwan_device_mode_info sar_data;
+ struct wwan_device_mode_configuration config_data[MAX_REGULATORY];
+};
+#endif /* INTEL_SAR_H */
diff --git a/drivers/platform/x86/intel/int33fe/Makefile b/drivers/platform/x86/intel/int33fe/Makefile
index cc11183ce179..9456e3b37f6f 100644
--- a/drivers/platform/x86/intel/int33fe/Makefile
+++ b/drivers/platform/x86/intel/int33fe/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_INTEL_CHT_INT33FE) += intel_cht_int33fe.o
-intel_cht_int33fe-objs := intel_cht_int33fe_common.o \
+intel_cht_int33fe-y := intel_cht_int33fe_common.o \
intel_cht_int33fe_typec.o \
intel_cht_int33fe_microb.o
diff --git a/drivers/platform/x86/intel/int33fe/intel_cht_int33fe_common.c b/drivers/platform/x86/intel/int33fe/intel_cht_int33fe_common.c
index 251ed9bac789..463222521e61 100644
--- a/drivers/platform/x86/intel/int33fe/intel_cht_int33fe_common.c
+++ b/drivers/platform/x86/intel/int33fe/intel_cht_int33fe_common.c
@@ -16,33 +16,6 @@
#define EXPECTED_PTYPE 4
-static int cht_int33fe_i2c_res_filter(struct acpi_resource *ares, void *data)
-{
- struct acpi_resource_i2c_serialbus *sb;
- int *count = data;
-
- if (i2c_acpi_get_i2c_resource(ares, &sb))
- (*count)++;
-
- return 1;
-}
-
-static int cht_int33fe_count_i2c_clients(struct device *dev)
-{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- LIST_HEAD(resource_list);
- int count = 0;
- int ret;
-
- ret = acpi_dev_get_resources(adev, &resource_list,
- cht_int33fe_i2c_res_filter, &count);
- acpi_dev_free_resource_list(&resource_list);
- if (ret < 0)
- return ret;
-
- return count;
-}
-
static int cht_int33fe_check_hw_type(struct device *dev)
{
unsigned long long ptyp;
@@ -69,7 +42,7 @@ static int cht_int33fe_check_hw_type(struct device *dev)
return -ENODEV;
}
- ret = cht_int33fe_count_i2c_clients(dev);
+ ret = i2c_acpi_client_count(ACPI_COMPANION(dev));
if (ret < 0)
return ret;
diff --git a/drivers/platform/x86/intel/int3472/Makefile b/drivers/platform/x86/intel/int3472/Makefile
index 48bd97f0a04e..2362e04db18d 100644
--- a/drivers/platform/x86/intel/int3472/Makefile
+++ b/drivers/platform/x86/intel/int3472/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_INTEL_SKL_INT3472) += intel_skl_int3472.o
-intel_skl_int3472-objs := intel_skl_int3472_common.o \
+intel_skl_int3472-y := intel_skl_int3472_common.o \
intel_skl_int3472_discrete.o \
intel_skl_int3472_tps68470.o \
intel_skl_int3472_clk_and_regulator.o
diff --git a/drivers/platform/x86/intel_mrfld_pwrbtn.c b/drivers/platform/x86/intel/mrfld_pwrbtn.c
index d58fea51747e..d58fea51747e 100644
--- a/drivers/platform/x86/intel_mrfld_pwrbtn.c
+++ b/drivers/platform/x86/intel/mrfld_pwrbtn.c
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel/oaktrail.c
index 1a09a75bd16d..1a09a75bd16d 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel/oaktrail.c
diff --git a/drivers/platform/x86/intel/pmc/Kconfig b/drivers/platform/x86/intel/pmc/Kconfig
new file mode 100644
index 000000000000..b526597e4deb
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Intel x86 Platform-Specific Drivers
+#
+
+config INTEL_PMC_CORE
+ tristate "Intel PMC Core driver"
+ depends on PCI
+ depends on ACPI
+ help
+ The Intel Platform Controller Hub for Intel Core SoCs provides access
+ to Power Management Controller registers via various interfaces. This
+ driver can utilize debugging capabilities and supported features as
+ exposed by the Power Management Controller. It also may perform some
+ tasks in the PMC in order to enable transition into the SLPS0 state.
+ It should be selected on all Intel platforms supported by the driver.
+
+ Supported features:
+ - SLP_S0_RESIDENCY counter
+ - PCH IP Power Gating status
+ - LTR Ignore / LTR Show
+ - MPHY/PLL gating status (Sunrisepoint PCH only)
+ - SLPS0 Debug registers (Cannonlake/Icelake PCH)
+ - Low Power Mode registers (Tigerlake and beyond)
+ - PMC quirks as needed to enable SLPS0/S0ix
diff --git a/drivers/platform/x86/intel/pmc/Makefile b/drivers/platform/x86/intel/pmc/Makefile
new file mode 100644
index 000000000000..8966fcdc0e1d
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Intel x86 Platform-Specific Drivers
+#
+
+intel_pmc_core-y := core.o
+obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o
+intel_pmc_core_pltdrv-y := pltdrv.o
+obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core_pltdrv.o
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel/pmc/core.c
index b0e486a6bdfb..ac19fcc9abbf 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -31,7 +31,7 @@
#include <asm/msr.h>
#include <asm/tsc.h>
-#include "intel_pmc_core.h"
+#include "core.h"
#define ACPI_S0IX_DSM_UUID "57a6512e-3979-4e9d-9708-ff13b2508972"
#define ACPI_GET_LOW_MODE_REGISTERS 1
@@ -645,6 +645,306 @@ free_acpi_obj:
ACPI_FREE(out_obj);
}
+/* Alder Lake: PGD PFET Enable Ack Status Register(s) bitmap */
+static const struct pmc_bit_map adl_pfear_map[] = {
+ {"SPI/eSPI", BIT(2)},
+ {"XHCI", BIT(3)},
+ {"SPA", BIT(4)},
+ {"SPB", BIT(5)},
+ {"SPC", BIT(6)},
+ {"GBE", BIT(7)},
+
+ {"SATA", BIT(0)},
+ {"HDA_PGD0", BIT(1)},
+ {"HDA_PGD1", BIT(2)},
+ {"HDA_PGD2", BIT(3)},
+ {"HDA_PGD3", BIT(4)},
+ {"SPD", BIT(5)},
+ {"LPSS", BIT(6)},
+
+ {"SMB", BIT(0)},
+ {"ISH", BIT(1)},
+ {"ITH", BIT(3)},
+
+ {"XDCI", BIT(1)},
+ {"DCI", BIT(2)},
+ {"CSE", BIT(3)},
+ {"CSME_KVM", BIT(4)},
+ {"CSME_PMT", BIT(5)},
+ {"CSME_CLINK", BIT(6)},
+ {"CSME_PTIO", BIT(7)},
+
+ {"CSME_USBR", BIT(0)},
+ {"CSME_SUSRAM", BIT(1)},
+ {"CSME_SMT1", BIT(2)},
+ {"CSME_SMS2", BIT(4)},
+ {"CSME_SMS1", BIT(5)},
+ {"CSME_RTC", BIT(6)},
+ {"CSME_PSF", BIT(7)},
+
+ {"CNVI", BIT(3)},
+
+ {"HDA_PGD4", BIT(2)},
+ {"HDA_PGD5", BIT(3)},
+ {"HDA_PGD6", BIT(4)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_adl_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ adl_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map adl_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"SOUTHPORT_F", ADL_PMC_LTR_SPF},
+ {"ME", CNP_PMC_LTR_ME},
+ /* EVA is Enterprise Value Add, doesn't really exist on PCH */
+ {"SATA1", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"SATA2", CNP_PMC_LTR_CAM},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"ISH", CNP_PMC_LTR_ISH},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
+ {"SOUTHPORT_G", CNP_PMC_LTR_RESERVED},
+
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+static const struct pmc_bit_map adl_clocksource_status_map[] = {
+ {"CLKPART1_OFF_STS", BIT(0)},
+ {"CLKPART2_OFF_STS", BIT(1)},
+ {"CLKPART3_OFF_STS", BIT(2)},
+ {"CLKPART4_OFF_STS", BIT(3)},
+ {"CLKPART5_OFF_STS", BIT(4)},
+ {"CLKPART6_OFF_STS", BIT(5)},
+ {"CLKPART7_OFF_STS", BIT(6)},
+ {"CLKPART8_OFF_STS", BIT(7)},
+ {"PCIE0PLL_OFF_STS", BIT(10)},
+ {"PCIE1PLL_OFF_STS", BIT(11)},
+ {"PCIE2PLL_OFF_STS", BIT(12)},
+ {"PCIE3PLL_OFF_STS", BIT(13)},
+ {"PCIE4PLL_OFF_STS", BIT(14)},
+ {"PCIE5PLL_OFF_STS", BIT(15)},
+ {"PCIE6PLL_OFF_STS", BIT(16)},
+ {"USB2PLL_OFF_STS", BIT(18)},
+ {"OCPLL_OFF_STS", BIT(22)},
+ {"AUDIOPLL_OFF_STS", BIT(23)},
+ {"GBEPLL_OFF_STS", BIT(24)},
+ {"Fast_XTAL_Osc_OFF_STS", BIT(25)},
+ {"AC_Ring_Osc_OFF_STS", BIT(26)},
+ {"MC_Ring_Osc_OFF_STS", BIT(27)},
+ {"SATAPLL_OFF_STS", BIT(29)},
+ {"USB3PLL_OFF_STS", BIT(31)},
+ {}
+};
+
+static const struct pmc_bit_map adl_power_gating_status_0_map[] = {
+ {"PMC_PGD0_PG_STS", BIT(0)},
+ {"DMI_PGD0_PG_STS", BIT(1)},
+ {"ESPISPI_PGD0_PG_STS", BIT(2)},
+ {"XHCI_PGD0_PG_STS", BIT(3)},
+ {"SPA_PGD0_PG_STS", BIT(4)},
+ {"SPB_PGD0_PG_STS", BIT(5)},
+ {"SPC_PGD0_PG_STS", BIT(6)},
+ {"GBE_PGD0_PG_STS", BIT(7)},
+ {"SATA_PGD0_PG_STS", BIT(8)},
+ {"DSP_PGD0_PG_STS", BIT(9)},
+ {"DSP_PGD1_PG_STS", BIT(10)},
+ {"DSP_PGD2_PG_STS", BIT(11)},
+ {"DSP_PGD3_PG_STS", BIT(12)},
+ {"SPD_PGD0_PG_STS", BIT(13)},
+ {"LPSS_PGD0_PG_STS", BIT(14)},
+ {"SMB_PGD0_PG_STS", BIT(16)},
+ {"ISH_PGD0_PG_STS", BIT(17)},
+ {"NPK_PGD0_PG_STS", BIT(19)},
+ {"PECI_PGD0_PG_STS", BIT(21)},
+ {"XDCI_PGD0_PG_STS", BIT(25)},
+ {"EXI_PGD0_PG_STS", BIT(26)},
+ {"CSE_PGD0_PG_STS", BIT(27)},
+ {"KVMCC_PGD0_PG_STS", BIT(28)},
+ {"PMT_PGD0_PG_STS", BIT(29)},
+ {"CLINK_PGD0_PG_STS", BIT(30)},
+ {"PTIO_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+static const struct pmc_bit_map adl_power_gating_status_1_map[] = {
+ {"USBR0_PGD0_PG_STS", BIT(0)},
+ {"SMT1_PGD0_PG_STS", BIT(2)},
+ {"CSMERTC_PGD0_PG_STS", BIT(6)},
+ {"CSMEPSF_PGD0_PG_STS", BIT(7)},
+ {"CNVI_PGD0_PG_STS", BIT(19)},
+ {"DSP_PGD4_PG_STS", BIT(26)},
+ {"SPG_PGD0_PG_STS", BIT(27)},
+ {"SPE_PGD0_PG_STS", BIT(28)},
+ {}
+};
+
+static const struct pmc_bit_map adl_power_gating_status_2_map[] = {
+ {"THC0_PGD0_PG_STS", BIT(7)},
+ {"THC1_PGD0_PG_STS", BIT(8)},
+ {"SPF_PGD0_PG_STS", BIT(14)},
+ {}
+};
+
+static const struct pmc_bit_map adl_d3_status_0_map[] = {
+ {"ISH_D3_STS", BIT(2)},
+ {"LPSS_D3_STS", BIT(3)},
+ {"XDCI_D3_STS", BIT(4)},
+ {"XHCI_D3_STS", BIT(5)},
+ {"SPA_D3_STS", BIT(12)},
+ {"SPB_D3_STS", BIT(13)},
+ {"SPC_D3_STS", BIT(14)},
+ {"SPD_D3_STS", BIT(15)},
+ {"SPE_D3_STS", BIT(16)},
+ {"DSP_D3_STS", BIT(19)},
+ {"SATA_D3_STS", BIT(20)},
+ {"DMI_D3_STS", BIT(22)},
+ {}
+};
+
+static const struct pmc_bit_map adl_d3_status_1_map[] = {
+ {"GBE_D3_STS", BIT(19)},
+ {"CNVI_D3_STS", BIT(27)},
+ {}
+};
+
+static const struct pmc_bit_map adl_d3_status_2_map[] = {
+ {"CSMERTC_D3_STS", BIT(1)},
+ {"CSE_D3_STS", BIT(4)},
+ {"KVMCC_D3_STS", BIT(5)},
+ {"USBR0_D3_STS", BIT(6)},
+ {"SMT1_D3_STS", BIT(8)},
+ {"PTIO_D3_STS", BIT(16)},
+ {"PMT_D3_STS", BIT(17)},
+ {}
+};
+
+static const struct pmc_bit_map adl_d3_status_3_map[] = {
+ {"THC0_D3_STS", BIT(14)},
+ {"THC1_D3_STS", BIT(15)},
+ {}
+};
+
+static const struct pmc_bit_map adl_vnn_req_status_0_map[] = {
+ {"ISH_VNN_REQ_STS", BIT(2)},
+ {"ESPISPI_VNN_REQ_STS", BIT(18)},
+ {"DSP_VNN_REQ_STS", BIT(19)},
+ {}
+};
+
+static const struct pmc_bit_map adl_vnn_req_status_1_map[] = {
+ {"NPK_VNN_REQ_STS", BIT(4)},
+ {"EXI_VNN_REQ_STS", BIT(9)},
+ {"GBE_VNN_REQ_STS", BIT(19)},
+ {"SMB_VNN_REQ_STS", BIT(25)},
+ {"CNVI_VNN_REQ_STS", BIT(27)},
+ {}
+};
+
+static const struct pmc_bit_map adl_vnn_req_status_2_map[] = {
+ {"CSMERTC_VNN_REQ_STS", BIT(1)},
+ {"CSE_VNN_REQ_STS", BIT(4)},
+ {"SMT1_VNN_REQ_STS", BIT(8)},
+ {"CLINK_VNN_REQ_STS", BIT(14)},
+ {"GPIOCOM4_VNN_REQ_STS", BIT(20)},
+ {"GPIOCOM3_VNN_REQ_STS", BIT(21)},
+ {"GPIOCOM2_VNN_REQ_STS", BIT(22)},
+ {"GPIOCOM1_VNN_REQ_STS", BIT(23)},
+ {"GPIOCOM0_VNN_REQ_STS", BIT(24)},
+ {}
+};
+
+static const struct pmc_bit_map adl_vnn_req_status_3_map[] = {
+ {"GPIOCOM5_VNN_REQ_STS", BIT(11)},
+ {}
+};
+
+static const struct pmc_bit_map adl_vnn_misc_status_map[] = {
+ {"CPU_C10_REQ_STS", BIT(0)},
+ {"PCIe_LPM_En_REQ_STS", BIT(3)},
+ {"ITH_REQ_STS", BIT(5)},
+ {"CNVI_REQ_STS", BIT(6)},
+ {"ISH_REQ_STS", BIT(7)},
+ {"USB2_SUS_PG_Sys_REQ_STS", BIT(10)},
+ {"PCIe_Clk_REQ_STS", BIT(12)},
+ {"MPHY_Core_DL_REQ_STS", BIT(16)},
+ {"Break-even_En_REQ_STS", BIT(17)},
+ {"MPHY_SUS_REQ_STS", BIT(22)},
+ {"xDCI_attached_REQ_STS", BIT(24)},
+ {}
+};
+
+static const struct pmc_bit_map *adl_lpm_maps[] = {
+ adl_clocksource_status_map,
+ adl_power_gating_status_0_map,
+ adl_power_gating_status_1_map,
+ adl_power_gating_status_2_map,
+ adl_d3_status_0_map,
+ adl_d3_status_1_map,
+ adl_d3_status_2_map,
+ adl_d3_status_3_map,
+ adl_vnn_req_status_0_map,
+ adl_vnn_req_status_1_map,
+ adl_vnn_req_status_2_map,
+ adl_vnn_req_status_3_map,
+ adl_vnn_misc_status_map,
+ tgl_signal_status_map,
+ NULL
+};
+
+static const struct pmc_reg_map adl_reg_map = {
+ .pfear_sts = ext_adl_pfear_map,
+ .slp_s0_offset = ADL_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .ltr_show_sts = adl_ltr_show_map,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED,
+ .lpm_num_modes = ADL_LPM_NUM_MODES,
+ .lpm_num_maps = ADL_LPM_NUM_MAPS,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .etr3_offset = ETR3_OFFSET,
+ .lpm_sts_latch_en_offset = ADL_LPM_STATUS_LATCH_EN_OFFSET,
+ .lpm_priority_offset = ADL_LPM_PRI_OFFSET,
+ .lpm_en_offset = ADL_LPM_EN_OFFSET,
+ .lpm_residency_offset = ADL_LPM_RESIDENCY_OFFSET,
+ .lpm_sts = adl_lpm_maps,
+ .lpm_status_offset = ADL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = ADL_LPM_LIVE_STATUS_OFFSET,
+};
+
static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
{
return readl(pmcdev->regbase + reg_offset);
@@ -1449,9 +1749,42 @@ static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
-static void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev)
+static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order)
{
- u8 lpm_priority[LPM_MAX_NUM_MODES];
+ int i, j;
+
+ if (!lpm_pri)
+ return false;
+ /*
+ * Each byte contains the priority level for 2 modes (7:4 and 3:0).
+ * In a 32 bit register this allows for describing 8 modes. Store the
+ * levels and look for values out of range.
+ */
+ for (i = 0; i < 8; i++) {
+ int level = lpm_pri & GENMASK(3, 0);
+
+ if (level >= LPM_MAX_NUM_MODES)
+ return false;
+
+ mode_order[i] = level;
+ lpm_pri >>= 4;
+ }
+
+ /* Check that we have unique values */
+ for (i = 0; i < LPM_MAX_NUM_MODES - 1; i++)
+ for (j = i + 1; j < LPM_MAX_NUM_MODES; j++)
+ if (mode_order[i] == mode_order[j])
+ return false;
+
+ return true;
+}
+
+static void pmc_core_get_low_power_modes(struct platform_device *pdev)
+{
+ struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
+ u8 pri_order[LPM_MAX_NUM_MODES] = LPM_DEFAULT_PRI;
+ u8 mode_order[LPM_MAX_NUM_MODES];
+ u32 lpm_pri;
u32 lpm_en;
int mode, i, p;
@@ -1462,24 +1795,28 @@ static void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev)
lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset);
pmcdev->num_lpm_modes = hweight32(lpm_en);
- /* Each byte contains information for 2 modes (7:4 and 3:0) */
- for (mode = 0; mode < LPM_MAX_NUM_MODES; mode += 2) {
- u8 priority = pmc_core_reg_read_byte(pmcdev,
- pmcdev->map->lpm_priority_offset + (mode / 2));
- int pri0 = GENMASK(3, 0) & priority;
- int pri1 = (GENMASK(7, 4) & priority) >> 4;
+ /* Read 32 bit LPM_PRI register */
+ lpm_pri = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_priority_offset);
- lpm_priority[pri0] = mode;
- lpm_priority[pri1] = mode + 1;
- }
/*
- * Loop though all modes from lowest to highest priority,
+ * If lpm_pri value passes verification, then override the default
+ * modes here. Otherwise stick with the default.
+ */
+ if (pmc_core_pri_verify(lpm_pri, mode_order))
+ /* Get list of modes in priority order */
+ for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++)
+ pri_order[mode_order[mode]] = mode;
+ else
+ dev_warn(&pdev->dev, "Assuming a default substate order for this platform\n");
+
+ /*
+ * Loop through all modes from lowest to highest priority,
* and capture all enabled modes in order
*/
i = 0;
for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) {
- int mode = lpm_priority[p];
+ int mode = pri_order[p];
if (!(BIT(mode) & lpm_en))
continue;
@@ -1574,6 +1911,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &icl_reg_map),
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &tgl_reg_map),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_reg_map),
{}
};
@@ -1675,17 +2013,17 @@ static int pmc_core_probe(struct platform_device *pdev)
mutex_init(&pmcdev->lock);
pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(pmcdev);
- pmc_core_get_low_power_modes(pmcdev);
+ pmc_core_get_low_power_modes(pdev);
pmc_core_do_dmi_quirks(pmcdev);
if (pmcdev->map == &tgl_reg_map)
pmc_core_get_tgl_lpm_reqs(pdev);
/*
- * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
- * a cable is attached. Tell the PMC to ignore it.
+ * On TGL and ADL, due to a hardware limitation, the GBE LTR blocks PC10
+ * when a cable is attached. Tell the PMC to ignore it.
*/
- if (pmcdev->map == &tgl_reg_map) {
+ if (pmcdev->map == &tgl_reg_map || pmcdev->map == &adl_reg_map) {
dev_dbg(&pdev->dev, "ignoring GBE LTR\n");
pmc_core_send_ltr_ignore(pmcdev, 3);
}
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel/pmc/core.h
index e8dae9c6c45f..a46d3b53bf61 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -188,6 +188,8 @@ enum ppfear_regs {
#define ICL_PMC_SLP_S0_RES_COUNTER_STEP 0x64
#define LPM_MAX_NUM_MODES 8
+#define LPM_DEFAULT_PRI { 7, 6, 2, 5, 4, 1, 3, 0 }
+
#define GET_X2_COUNTER(v) ((v) >> 1)
#define LPM_STS_LATCH_MODE BIT(31)
@@ -197,6 +199,10 @@ enum ppfear_regs {
#define TGL_NUM_IP_IGN_ALLOWED 23
#define TGL_PMC_LPM_RES_COUNTER_STEP_X2 61 /* 30.5us * 2 */
+#define ADL_PMC_LTR_SPF 0x1C00
+#define ADL_NUM_IP_IGN_ALLOWED 23
+#define ADL_PMC_SLP_S0_RES_COUNTER_OFFSET 0x1098
+
/*
* Tigerlake Power Management Controller register offsets
*/
@@ -218,6 +224,18 @@ enum ppfear_regs {
/* Extended Test Mode Register LPM bits (TGL and later */
#define ETR3_CLEAR_LPM_EVENTS BIT(28)
+/* Alder Lake Power Management Controller register offsets */
+#define ADL_LPM_EN_OFFSET 0x179C
+#define ADL_LPM_RESIDENCY_OFFSET 0x17A4
+#define ADL_LPM_NUM_MODES 2
+#define ADL_LPM_NUM_MAPS 14
+
+/* Alder Lake Low Power Mode debug registers */
+#define ADL_LPM_STATUS_OFFSET 0x170C
+#define ADL_LPM_PRI_OFFSET 0x17A0
+#define ADL_LPM_STATUS_LATCH_EN_OFFSET 0x1704
+#define ADL_LPM_LIVE_STATUS_OFFSET 0x1764
+
const char *pmc_lpm_modes[] = {
"S0i2.0",
"S0i2.1",
@@ -277,6 +295,7 @@ struct pmc_reg_map {
const u32 pm_vric1_offset;
/* Low Power Mode registers */
const int lpm_num_maps;
+ const int lpm_num_modes;
const int lpm_res_counter_step_x2;
const u32 lpm_sts_latch_en_offset;
const u32 lpm_en_offset;
diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel/pmc/pltdrv.c
index 73797680b895..73797680b895 100644
--- a/drivers/platform/x86/intel_pmc_core_pltdrv.c
+++ b/drivers/platform/x86/intel/pmc/pltdrv.c
diff --git a/drivers/platform/x86/intel/pmt/Kconfig b/drivers/platform/x86/intel/pmt/Kconfig
new file mode 100644
index 000000000000..d630f883a717
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/Kconfig
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel Platform Monitoring Technology drivers
+#
+
+config INTEL_PMT_CLASS
+ tristate
+ help
+ The Intel Platform Monitoring Technology (PMT) class driver provides
+ the basic sysfs interface and file hierarchy used by PMT devices.
+
+ For more information, see:
+ <file:Documentation/ABI/testing/sysfs-class-intel_pmt>
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_pmt_class.
+
+config INTEL_PMT_TELEMETRY
+ tristate "Intel Platform Monitoring Technology (PMT) Telemetry driver"
+ depends on MFD_INTEL_PMT
+ select INTEL_PMT_CLASS
+ help
+ The Intel Platform Monitory Technology (PMT) Telemetry driver provides
+ access to hardware telemetry metrics on devices that support the
+ feature.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_pmt_telemetry.
+
+config INTEL_PMT_CRASHLOG
+ tristate "Intel Platform Monitoring Technology (PMT) Crashlog driver"
+ depends on MFD_INTEL_PMT
+ select INTEL_PMT_CLASS
+ help
+ The Intel Platform Monitoring Technology (PMT) crashlog driver provides
+ access to hardware crashlog capabilities on devices that support the
+ feature.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_pmt_crashlog.
diff --git a/drivers/platform/x86/intel/pmt/Makefile b/drivers/platform/x86/intel/pmt/Makefile
new file mode 100644
index 000000000000..279e158c7c23
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/intel/pmt
+# Intel Platform Monitoring Technology Drivers
+#
+
+obj-$(CONFIG_INTEL_PMT_CLASS) += pmt_class.o
+pmt_class-y := class.o
+obj-$(CONFIG_INTEL_PMT_TELEMETRY) += pmt_telemetry.o
+pmt_telemetry-y := telemetry.o
+obj-$(CONFIG_INTEL_PMT_CRASHLOG) += pmt_crashlog.o
+pmt_crashlog-y := crashlog.o
diff --git a/drivers/platform/x86/intel_pmt_class.c b/drivers/platform/x86/intel/pmt/class.c
index c86ff15b1ed5..659b1073033c 100644
--- a/drivers/platform/x86/intel_pmt_class.c
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -13,7 +13,7 @@
#include <linux/mm.h>
#include <linux/pci.h>
-#include "intel_pmt_class.h"
+#include "class.h"
#define PMT_XA_START 0
#define PMT_XA_MAX INT_MAX
diff --git a/drivers/platform/x86/intel_pmt_class.h b/drivers/platform/x86/intel/pmt/class.h
index 1337019c2873..1337019c2873 100644
--- a/drivers/platform/x86/intel_pmt_class.h
+++ b/drivers/platform/x86/intel/pmt/class.h
diff --git a/drivers/platform/x86/intel_pmt_crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c
index 56963ceb6345..1c1021f04d3c 100644
--- a/drivers/platform/x86/intel_pmt_crashlog.c
+++ b/drivers/platform/x86/intel/pmt/crashlog.c
@@ -15,7 +15,7 @@
#include <linux/uaccess.h>
#include <linux/overflow.h>
-#include "intel_pmt_class.h"
+#include "class.h"
#define DRV_NAME "pmt_crashlog"
diff --git a/drivers/platform/x86/intel_pmt_telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
index 9b95ef050457..38d52651c572 100644
--- a/drivers/platform/x86/intel_pmt_telemetry.c
+++ b/drivers/platform/x86/intel/pmt/telemetry.c
@@ -15,7 +15,7 @@
#include <linux/uaccess.h>
#include <linux/overflow.h>
-#include "intel_pmt_class.h"
+#include "class.h"
#define TELEM_DEV_NAME "pmt_telemetry"
@@ -61,6 +61,14 @@ static int pmt_telem_header_decode(struct intel_pmt_entry *entry,
/* Size is measured in DWORDS, but accessor returns bytes */
header->size = TELEM_SIZE(readl(disc_table));
+ /*
+ * Some devices may expose non-functioning entries that are
+ * reserved for future use. They have zero size. Do not fail
+ * probe for these. Just ignore them.
+ */
+ if (header->size == 0)
+ return 1;
+
return 0;
}
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel/punit_ipc.c
index f58b8543f6ac..f58b8543f6ac 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel/punit_ipc.c
diff --git a/drivers/platform/x86/intel-rst.c b/drivers/platform/x86/intel/rst.c
index 3b81cb896fed..3b81cb896fed 100644
--- a/drivers/platform/x86/intel-rst.c
+++ b/drivers/platform/x86/intel/rst.c
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel/smartconnect.c
index 64c2dc93472f..64c2dc93472f 100644
--- a/drivers/platform/x86/intel-smartconnect.c
+++ b/drivers/platform/x86/intel/smartconnect.c
diff --git a/drivers/platform/x86/intel_speed_select_if/Kconfig b/drivers/platform/x86/intel/speed_select_if/Kconfig
index ce3e3dc076d2..ce3e3dc076d2 100644
--- a/drivers/platform/x86/intel_speed_select_if/Kconfig
+++ b/drivers/platform/x86/intel/speed_select_if/Kconfig
diff --git a/drivers/platform/x86/intel_speed_select_if/Makefile b/drivers/platform/x86/intel/speed_select_if/Makefile
index 856076206f35..856076206f35 100644
--- a/drivers/platform/x86/intel_speed_select_if/Makefile
+++ b/drivers/platform/x86/intel/speed_select_if/Makefile
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 6f0cc679c8e5..c9a85eb2e860 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -265,9 +265,9 @@ static int isst_if_get_platform_info(void __user *argp)
{
struct isst_if_platform_info info;
- info.api_version = ISST_IF_API_VERSION,
- info.driver_version = ISST_IF_DRIVER_VERSION,
- info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT,
+ info.api_version = ISST_IF_API_VERSION;
+ info.driver_version = ISST_IF_DRIVER_VERSION;
+ info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT;
info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
@@ -379,6 +379,8 @@ static int isst_if_cpu_online(unsigned int cpu)
u64 data;
int ret;
+ isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
+
ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
if (ret) {
/* This is not a fatal error on MSR mailbox only I/F */
@@ -397,7 +399,6 @@ static int isst_if_cpu_online(unsigned int cpu)
return ret;
}
isst_cpu_info[cpu].punit_cpu_id = data;
- isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
isst_restore_msr_local(cpu);
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.h b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
index fdecdae248d7..fdecdae248d7 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.h
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
index 1b6eab071068..1b6eab071068 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c
index df1fc6c719f3..df1fc6c719f3 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
index ff49025ec085..ff49025ec085 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
diff --git a/drivers/platform/x86/intel/telemetry/Kconfig b/drivers/platform/x86/intel/telemetry/Kconfig
new file mode 100644
index 000000000000..da887bd03731
--- /dev/null
+++ b/drivers/platform/x86/intel/telemetry/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+config INTEL_TELEMETRY
+ tristate "Intel SoC Telemetry driver"
+ depends on X86_64
+ depends on MFD_INTEL_PMC_BXT
+ depends on INTEL_PUNIT_IPC
+ help
+ This driver provides interfaces to configure and use
+ telemetry for Intel SoC from Apollo Lake onwards.
+ It is also used to get various SoC events and parameters
+ directly via debugfs files. Various tools may use
+ this interface for SoC state monitoring.
diff --git a/drivers/platform/x86/intel/telemetry/Makefile b/drivers/platform/x86/intel/telemetry/Makefile
new file mode 100644
index 000000000000..bfdba5b6c59a
--- /dev/null
+++ b/drivers/platform/x86/intel/telemetry/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+intel_telemetry_core-y := core.o
+obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o
+intel_telemetry_pltdrv-y := pltdrv.o
+obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_pltdrv.o
+intel_telemetry_debugfs-y := debugfs.o
+obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_debugfs.o
diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel/telemetry/core.c
index fdf55b5d6948..fdf55b5d6948 100644
--- a/drivers/platform/x86/intel_telemetry_core.c
+++ b/drivers/platform/x86/intel/telemetry/core.c
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel/telemetry/debugfs.c
index 1d4d0fbfd63c..1d4d0fbfd63c 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel/telemetry/debugfs.c
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel/telemetry/pltdrv.c
index 405dea87de6b..405dea87de6b 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel/telemetry/pltdrv.c
diff --git a/drivers/platform/x86/intel_turbo_max_3.c b/drivers/platform/x86/intel/turbo_max_3.c
index 892140b62898..892140b62898 100644
--- a/drivers/platform/x86/intel_turbo_max_3.c
+++ b/drivers/platform/x86/intel/turbo_max_3.c
diff --git a/drivers/platform/x86/intel-uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency.c
index 3ee4c5c8a64f..3ee4c5c8a64f 100644
--- a/drivers/platform/x86/intel-uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency.c
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 309166431063..15f013af9e62 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -14,7 +14,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/suspend.h>
-#include "dual_accel_detect.h"
+#include "../dual_accel_detect.h"
/* Returned when NOT in tablet mode on some HP Stream x360 11 models */
#define VGBS_TABLET_MODE_FLAG_ALT 0x10
diff --git a/drivers/platform/x86/intel/wmi/Kconfig b/drivers/platform/x86/intel/wmi/Kconfig
new file mode 100644
index 000000000000..8e159f712179
--- /dev/null
+++ b/drivers/platform/x86/intel/wmi/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+config INTEL_WMI
+ bool
+
+config INTEL_WMI_SBL_FW_UPDATE
+ tristate "Intel WMI Slim Bootloader firmware update signaling driver"
+ depends on ACPI_WMI
+ select INTEL_WMI
+ help
+ Say Y here if you want to be able to use the WMI interface to signal
+ Slim Bootloader to trigger update on next reboot.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel-wmi-sbl-fw-update.
+
+config INTEL_WMI_THUNDERBOLT
+ tristate "Intel WMI thunderbolt force power driver"
+ depends on ACPI_WMI
+ select INTEL_WMI
+ help
+ Say Y here if you want to be able to use the WMI interface on select
+ systems to force the power control of Intel Thunderbolt controllers.
+ This is useful for updating the firmware when devices are not plugged
+ into the controller.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel-wmi-thunderbolt.
diff --git a/drivers/platform/x86/intel/wmi/Makefile b/drivers/platform/x86/intel/wmi/Makefile
new file mode 100644
index 000000000000..c2d56d25dea0
--- /dev/null
+++ b/drivers/platform/x86/intel/wmi/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+intel-wmi-sbl-fw-update-y := sbl-fw-update.o
+obj-$(CONFIG_INTEL_WMI_SBL_FW_UPDATE) += intel-wmi-sbl-fw-update.o
+intel-wmi-thunderbolt-y := thunderbolt.o
+obj-$(CONFIG_INTEL_WMI_THUNDERBOLT) += intel-wmi-thunderbolt.o
diff --git a/drivers/platform/x86/intel-wmi-sbl-fw-update.c b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
index 3c86e0108a24..3c86e0108a24 100644
--- a/drivers/platform/x86/intel-wmi-sbl-fw-update.c
+++ b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
diff --git a/drivers/platform/x86/intel-wmi-thunderbolt.c b/drivers/platform/x86/intel/wmi/thunderbolt.c
index 4ae87060d18b..4ae87060d18b 100644
--- a/drivers/platform/x86/intel-wmi-thunderbolt.c
+++ b/drivers/platform/x86/intel/wmi/thunderbolt.c
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 9171a46a9e3f..bfa0cc20750d 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -457,7 +457,7 @@ int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command);
/**
- * intel_scu_ipc_command_with_size() - Command with data
+ * intel_scu_ipc_dev_command_with_size() - Command with data
* @scu: Optional SCU IPC instance
* @cmd: Command
* @sub: Sub type
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index 20145b539335..3e520d5bca07 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/kernel.h>
@@ -16,11 +17,12 @@
#include <linux/platform_device.h>
#include <linux/types.h>
-#define LED_DEVICE(_name, max) struct led_classdev _name = { \
+#define LED_DEVICE(_name, max, flag) struct led_classdev _name = { \
.name = __stringify(_name), \
.max_brightness = max, \
.brightness_set = _name##_set, \
.brightness_get = _name##_get, \
+ .flags = flag, \
}
MODULE_AUTHOR("Matan Ziv-Av");
@@ -69,9 +71,13 @@ static u32 inited;
#define INIT_INPUT_ACPI 0x04
#define INIT_SPARSE_KEYMAP 0x80
+static int battery_limit_use_wmbb;
+static struct led_classdev kbd_backlight;
+static enum led_brightness get_kbd_backlight_level(void);
+
static const struct key_entry wmi_keymap[] = {
{KE_KEY, 0x70, {KEY_F15} }, /* LG control panel (F1) */
- {KE_KEY, 0x74, {KEY_F13} }, /* Touchpad toggle (F5) */
+ {KE_KEY, 0x74, {KEY_F21} }, /* Touchpad toggle (F5) */
{KE_KEY, 0xf020000, {KEY_F14} }, /* Read mode (F9) */
{KE_KEY, 0x10000000, {KEY_F16} },/* Keyboard backlight (F8) - pressing
* this key both sends an event and
@@ -214,10 +220,16 @@ static void wmi_notify(u32 value, void *context)
int eventcode = obj->integer.value;
struct key_entry *key;
- key =
- sparse_keymap_entry_from_scancode(wmi_input_dev, eventcode);
- if (key && key->type == KE_KEY)
- sparse_keymap_report_entry(wmi_input_dev, key, 1, true);
+ if (eventcode == 0x10000000) {
+ led_classdev_notify_brightness_hw_changed(
+ &kbd_backlight, get_kbd_backlight_level());
+ } else {
+ key = sparse_keymap_entry_from_scancode(
+ wmi_input_dev, eventcode);
+ if (key && key->type == KE_KEY)
+ sparse_keymap_report_entry(wmi_input_dev,
+ key, 1, true);
+ }
}
pr_debug("Type: %i Eventcode: 0x%llx\n", obj->type,
@@ -461,7 +473,10 @@ static ssize_t battery_care_limit_store(struct device *dev,
if (value == 100 || value == 80) {
union acpi_object *r;
- r = lg_wmab(WM_BATT_LIMIT, WM_SET, value);
+ if (battery_limit_use_wmbb)
+ r = lg_wmbb(WMBB_BATT_LIMIT, WM_SET, value);
+ else
+ r = lg_wmab(WM_BATT_LIMIT, WM_SET, value);
if (!r)
return -EIO;
@@ -479,16 +494,29 @@ static ssize_t battery_care_limit_show(struct device *dev,
unsigned int status;
union acpi_object *r;
- r = lg_wmab(WM_BATT_LIMIT, WM_GET, 0);
- if (!r)
- return -EIO;
+ if (battery_limit_use_wmbb) {
+ r = lg_wmbb(WMBB_BATT_LIMIT, WM_GET, 0);
+ if (!r)
+ return -EIO;
- if (r->type != ACPI_TYPE_INTEGER) {
- kfree(r);
- return -EIO;
- }
+ if (r->type != ACPI_TYPE_BUFFER) {
+ kfree(r);
+ return -EIO;
+ }
- status = r->integer.value;
+ status = r->buffer.pointer[0x10];
+ } else {
+ r = lg_wmab(WM_BATT_LIMIT, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+ if (r->type != ACPI_TYPE_INTEGER) {
+ kfree(r);
+ return -EIO;
+ }
+
+ status = r->integer.value;
+ }
kfree(r);
if (status != 80 && status != 100)
status = 0;
@@ -529,7 +557,7 @@ static enum led_brightness tpad_led_get(struct led_classdev *cdev)
return ggov(GOV_TLED) > 0 ? LED_ON : LED_OFF;
}
-static LED_DEVICE(tpad_led, 1);
+static LED_DEVICE(tpad_led, 1, 0);
static void kbd_backlight_set(struct led_classdev *cdev,
enum led_brightness brightness)
@@ -546,7 +574,7 @@ static void kbd_backlight_set(struct led_classdev *cdev,
kfree(r);
}
-static enum led_brightness kbd_backlight_get(struct led_classdev *cdev)
+static enum led_brightness get_kbd_backlight_level(void)
{
union acpi_object *r;
int val;
@@ -577,7 +605,12 @@ static enum led_brightness kbd_backlight_get(struct led_classdev *cdev)
return val;
}
-static LED_DEVICE(kbd_backlight, 255);
+static enum led_brightness kbd_backlight_get(struct led_classdev *cdev)
+{
+ return get_kbd_backlight_level();
+}
+
+static LED_DEVICE(kbd_backlight, 255, LED_BRIGHT_HW_CHANGED);
static void wmi_input_destroy(void)
{
@@ -602,6 +635,8 @@ static struct platform_driver pf_driver = {
static int acpi_add(struct acpi_device *device)
{
int ret;
+ const char *product;
+ int year = 2017;
if (pf_device)
return 0;
@@ -619,6 +654,42 @@ static int acpi_add(struct acpi_device *device)
pr_err("unable to register platform device\n");
goto out_platform_registered;
}
+ product = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (strlen(product) > 4)
+ switch (product[4]) {
+ case '5':
+ case '6':
+ year = 2016;
+ break;
+ case '7':
+ year = 2017;
+ break;
+ case '8':
+ year = 2018;
+ break;
+ case '9':
+ year = 2019;
+ break;
+ case '0':
+ if (strlen(product) > 5)
+ switch (product[5]) {
+ case 'N':
+ year = 2020;
+ break;
+ case 'P':
+ year = 2021;
+ break;
+ default:
+ year = 2022;
+ }
+ break;
+ default:
+ year = 2019;
+ }
+ pr_info("product: %s year: %d\n", product, year);
+
+ if (year >= 2019)
+ battery_limit_use_wmbb = 1;
ret = sysfs_create_group(&pf_device->dev.kobj, &dev_attribute_group);
if (ret)
diff --git a/drivers/platform/x86/meraki-mx100.c b/drivers/platform/x86/meraki-mx100.c
new file mode 100644
index 000000000000..3751ed36a980
--- /dev/null
+++ b/drivers/platform/x86/meraki-mx100.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Cisco Meraki MX100 (Tinkerbell) board platform driver
+ *
+ * Based off of arch/x86/platform/meraki/tink.c from the
+ * Meraki GPL release meraki-firmware-sources-r23-20150601
+ *
+ * Format inspired by platform/x86/pcengines-apuv2.c
+ *
+ * Copyright (C) 2021 Chris Blake <chrisrblake93@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/gpio_keys.h>
+#include <linux/gpio/machine.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define TINK_GPIO_DRIVER_NAME "gpio_ich"
+
+/* LEDs */
+static const struct gpio_led tink_leds[] = {
+ {
+ .name = "mx100:green:internet",
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "mx100:green:lan2",
+ },
+ {
+ .name = "mx100:green:lan3",
+ },
+ {
+ .name = "mx100:green:lan4",
+ },
+ {
+ .name = "mx100:green:lan5",
+ },
+ {
+ .name = "mx100:green:lan6",
+ },
+ {
+ .name = "mx100:green:lan7",
+ },
+ {
+ .name = "mx100:green:lan8",
+ },
+ {
+ .name = "mx100:green:lan9",
+ },
+ {
+ .name = "mx100:green:lan10",
+ },
+ {
+ .name = "mx100:green:lan11",
+ },
+ {
+ .name = "mx100:green:ha",
+ },
+ {
+ .name = "mx100:orange:ha",
+ },
+ {
+ .name = "mx100:green:usb",
+ },
+ {
+ .name = "mx100:orange:usb",
+ },
+};
+
+static const struct gpio_led_platform_data tink_leds_pdata = {
+ .num_leds = ARRAY_SIZE(tink_leds),
+ .leds = tink_leds,
+};
+
+static struct gpiod_lookup_table tink_leds_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 11,
+ NULL, 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 18,
+ NULL, 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 20,
+ NULL, 2, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 22,
+ NULL, 3, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 23,
+ NULL, 4, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 32,
+ NULL, 5, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 34,
+ NULL, 6, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 35,
+ NULL, 7, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 36,
+ NULL, 8, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 37,
+ NULL, 9, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 48,
+ NULL, 10, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 16,
+ NULL, 11, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 7,
+ NULL, 12, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 21,
+ NULL, 13, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 19,
+ NULL, 14, GPIO_ACTIVE_LOW),
+ {} /* Terminating entry */
+ }
+};
+
+/* Reset Button */
+static struct gpio_keys_button tink_buttons[] = {
+ {
+ .desc = "Reset",
+ .type = EV_KEY,
+ .code = KEY_RESTART,
+ .active_low = 1,
+ .debounce_interval = 100,
+ },
+};
+
+static const struct gpio_keys_platform_data tink_buttons_pdata = {
+ .buttons = tink_buttons,
+ .nbuttons = ARRAY_SIZE(tink_buttons),
+ .poll_interval = 20,
+ .rep = 0,
+ .name = "mx100-keys",
+};
+
+static struct gpiod_lookup_table tink_keys_table = {
+ .dev_id = "gpio-keys-polled",
+ .table = {
+ GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 60,
+ NULL, 0, GPIO_ACTIVE_LOW),
+ {} /* Terminating entry */
+ }
+};
+
+/* Board setup */
+static const struct dmi_system_id tink_systems[] __initconst = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Cisco"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MX100-HW"),
+ },
+ },
+ {} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(dmi, tink_systems);
+
+static struct platform_device *tink_leds_pdev;
+static struct platform_device *tink_keys_pdev;
+
+static struct platform_device * __init tink_create_dev(
+ const char *name, const void *pdata, size_t sz)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_data(NULL,
+ name, PLATFORM_DEVID_NONE, pdata, sz);
+ if (IS_ERR(pdev))
+ pr_err("failed registering %s: %ld\n", name, PTR_ERR(pdev));
+
+ return pdev;
+}
+
+static int __init tink_board_init(void)
+{
+ int ret;
+
+ if (!dmi_first_match(tink_systems))
+ return -ENODEV;
+
+ /*
+ * We need to make sure that GPIO60 isn't set to native mode as is default since it's our
+ * Reset Button. To do this, write to GPIO_USE_SEL2 to have GPIO60 set to GPIO mode.
+ * This is documented on page 1609 of the PCH datasheet, order number 327879-005US
+ */
+ outl(inl(0x530) | BIT(28), 0x530);
+
+ gpiod_add_lookup_table(&tink_leds_table);
+ gpiod_add_lookup_table(&tink_keys_table);
+
+ tink_leds_pdev = tink_create_dev("leds-gpio",
+ &tink_leds_pdata, sizeof(tink_leds_pdata));
+ if (IS_ERR(tink_leds_pdev)) {
+ ret = PTR_ERR(tink_leds_pdev);
+ goto err;
+ }
+
+ tink_keys_pdev = tink_create_dev("gpio-keys-polled",
+ &tink_buttons_pdata, sizeof(tink_buttons_pdata));
+ if (IS_ERR(tink_keys_pdev)) {
+ ret = PTR_ERR(tink_keys_pdev);
+ platform_device_unregister(tink_leds_pdev);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ gpiod_remove_lookup_table(&tink_keys_table);
+ gpiod_remove_lookup_table(&tink_leds_table);
+ return ret;
+}
+module_init(tink_board_init);
+
+static void __exit tink_board_exit(void)
+{
+ platform_device_unregister(tink_keys_pdev);
+ platform_device_unregister(tink_leds_pdev);
+ gpiod_remove_lookup_table(&tink_keys_table);
+ gpiod_remove_lookup_table(&tink_leds_table);
+}
+module_exit(tink_board_exit);
+
+MODULE_AUTHOR("Chris Blake <chrisrblake93@gmail.com>");
+MODULE_DESCRIPTION("Cisco Meraki MX100 Platform Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:meraki-mx100");
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 6cfed4427fb0..9472aae72df2 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -20,6 +20,10 @@
#include "firmware_attributes_class.h"
#include "think-lmi.h"
+static bool debug_support;
+module_param(debug_support, bool, 0444);
+MODULE_PARM_DESC(debug_support, "Enable debug command support");
+
/*
* Name:
* Lenovo_BiosSetting
@@ -116,6 +120,14 @@
*/
#define LENOVO_GET_BIOS_SELECTIONS_GUID "7364651A-132F-4FE7-ADAA-40C6C7EE2E3B"
+/*
+ * Name:
+ * Lenovo_DebugCmdGUID
+ * Description
+ * Debug entry GUID method for entering debug commands to the BIOS
+ */
+#define LENOVO_DEBUG_CMD_GUID "7FF47003-3B6C-4E5E-A227-E979824A85D1"
+
#define TLMI_POP_PWD (1 << 0)
#define TLMI_PAP_PWD (1 << 1)
#define to_tlmi_pwd_setting(kobj) container_of(kobj, struct tlmi_pwd_setting, kobj)
@@ -660,6 +672,64 @@ static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *
static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
+/* ---- Debug interface--------------------------------------------------------- */
+static ssize_t debug_cmd_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ char *set_str = NULL, *new_setting = NULL;
+ char *auth_str = NULL;
+ char *p;
+ int ret;
+
+ if (!tlmi_priv.can_debug_cmd)
+ return -EOPNOTSUPP;
+
+ new_setting = kstrdup(buf, GFP_KERNEL);
+ if (!new_setting)
+ return -ENOMEM;
+
+ /* Strip out CR if one is present */
+ p = strchrnul(new_setting, '\n');
+ *p = '\0';
+
+ if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
+ auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s;",
+ tlmi_priv.pwd_admin->password,
+ encoding_options[tlmi_priv.pwd_admin->encoding],
+ tlmi_priv.pwd_admin->kbdlang);
+ if (!auth_str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (auth_str)
+ set_str = kasprintf(GFP_KERNEL, "%s,%s", new_setting, auth_str);
+ else
+ set_str = kasprintf(GFP_KERNEL, "%s;", new_setting);
+ if (!set_str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = tlmi_simple_call(LENOVO_DEBUG_CMD_GUID, set_str);
+ if (ret)
+ goto out;
+
+ if (!ret && !tlmi_priv.pending_changes) {
+ tlmi_priv.pending_changes = true;
+ /* let userland know it may need to check reboot pending again */
+ kobject_uevent(&tlmi_priv.class_dev->kobj, KOBJ_CHANGE);
+ }
+out:
+ kfree(auth_str);
+ kfree(set_str);
+ kfree(new_setting);
+ return ret ?: count;
+}
+
+static struct kobj_attribute debug_cmd = __ATTR_WO(debug_cmd);
+
/* ---- Initialisation --------------------------------------------------------- */
static void tlmi_release_attr(void)
{
@@ -673,6 +743,8 @@ static void tlmi_release_attr(void)
}
}
sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
+ if (tlmi_priv.can_debug_cmd && debug_support)
+ sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &debug_cmd.attr);
kset_unregister(tlmi_priv.attribute_kset);
/* Authentication structures */
@@ -737,6 +809,11 @@ static int tlmi_sysfs_init(void)
if (ret)
goto fail_create_attr;
+ if (tlmi_priv.can_debug_cmd && debug_support) {
+ ret = sysfs_create_file(&tlmi_priv.attribute_kset->kobj, &debug_cmd.attr);
+ if (ret)
+ goto fail_create_attr;
+ }
/* Create authentication entries */
tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL,
&tlmi_priv.class_dev->kobj);
@@ -793,6 +870,9 @@ static int tlmi_analyze(void)
if (wmi_has_guid(LENOVO_BIOS_PASSWORD_SETTINGS_GUID))
tlmi_priv.can_get_password_settings = true;
+ if (wmi_has_guid(LENOVO_DEBUG_CMD_GUID))
+ tlmi_priv.can_debug_cmd = true;
+
/*
* Try to find the number of valid settings of this machine
* and use it to create sysfs attributes.
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
index eb598846628a..f8e26823075f 100644
--- a/drivers/platform/x86/think-lmi.h
+++ b/drivers/platform/x86/think-lmi.h
@@ -61,6 +61,7 @@ struct think_lmi {
bool can_set_bios_password;
bool can_get_password_settings;
bool pending_changes;
+ bool can_debug_cmd;
struct tlmi_attr_setting *setting[TLMI_SETTINGS_COUNT];
struct device *class_dev;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index c76adedd58c9..aa29841bbb79 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -272,7 +272,7 @@ config PWM_IQS620A
config PWM_JZ4740
tristate "Ingenic JZ47xx PWM support"
- depends on MIPS
+ depends on MIPS || COMPILE_TEST
depends on COMMON_CLK
select MFD_SYSCON
help
@@ -284,7 +284,8 @@ config PWM_JZ4740
config PWM_KEEMBAY
tristate "Intel Keem Bay PWM driver"
- depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST)
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on COMMON_CLK && HAS_IOMEM
help
The platform driver for Intel Keem Bay PWM controller.
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 35e894f4a379..4527f09a5c50 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(pwmchip_add);
*
* Returns: 0 on success or a negative error code on failure.
*/
-int pwmchip_remove(struct pwm_chip *chip)
+void pwmchip_remove(struct pwm_chip *chip)
{
pwmchip_sysfs_unexport(chip);
@@ -318,8 +318,6 @@ int pwmchip_remove(struct pwm_chip *chip)
free_pwms(chip);
mutex_unlock(&pwm_lock);
-
- return 0;
}
EXPORT_SYMBOL_GPL(pwmchip_remove);
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index e2a26d9da25b..ad37bc46f272 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -22,14 +22,21 @@
struct ab8500_pwm_chip {
struct pwm_chip chip;
+ unsigned int hwid;
};
+static struct ab8500_pwm_chip *ab8500_pwm_from_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct ab8500_pwm_chip, chip);
+}
+
static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
int ret;
u8 reg;
unsigned int higher_val, lower_val;
+ struct ab8500_pwm_chip *ab8500 = ab8500_pwm_from_chip(chip);
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
@@ -37,7 +44,7 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!state->enabled) {
ret = abx500_mask_and_set_register_interruptible(chip->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
- 1 << (chip->base - 1), 0);
+ 1 << ab8500->hwid, 0);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
@@ -56,7 +63,7 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
*/
higher_val = ((state->duty_cycle & 0x0300) >> 8);
- reg = AB8500_PWM_OUT_CTRL1_REG + ((chip->base - 1) * 2);
+ reg = AB8500_PWM_OUT_CTRL1_REG + (ab8500->hwid * 2);
ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC,
reg, (u8)lower_val);
@@ -70,7 +77,7 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ret = abx500_mask_and_set_register_interruptible(chip->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
- 1 << (chip->base - 1), 1 << (chip->base - 1));
+ 1 << ab8500->hwid, 1 << ab8500->hwid);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
pwm->label, ret);
@@ -88,6 +95,9 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
struct ab8500_pwm_chip *ab8500;
int err;
+ if (pdev->id < 1 || pdev->id > 31)
+ return dev_err_probe(&pdev->dev, EINVAL, "Invalid device id %d\n", pdev->id);
+
/*
* Nothing to be done in probe, this is required to get the
* device which is required for ab8500 read and write
@@ -99,27 +109,13 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
ab8500->chip.dev = &pdev->dev;
ab8500->chip.ops = &ab8500_pwm_ops;
ab8500->chip.npwm = 1;
+ ab8500->hwid = pdev->id - 1;
- err = pwmchip_add(&ab8500->chip);
+ err = devm_pwmchip_add(&pdev->dev, &ab8500->chip);
if (err < 0)
return dev_err_probe(&pdev->dev, err, "Failed to add pwm chip\n");
dev_dbg(&pdev->dev, "pwm probe successful\n");
- platform_set_drvdata(pdev, ab8500);
-
- return 0;
-}
-
-static int ab8500_pwm_remove(struct platform_device *pdev)
-{
- struct ab8500_pwm_chip *ab8500 = platform_get_drvdata(pdev);
- int err;
-
- err = pwmchip_remove(&ab8500->chip);
- if (err < 0)
- return err;
-
- dev_dbg(&pdev->dev, "pwm driver removed\n");
return 0;
}
@@ -129,7 +125,6 @@ static struct platform_driver ab8500_pwm_driver = {
.name = "ab8500-pwm",
},
.probe = ab8500_pwm_probe,
- .remove = ab8500_pwm_remove,
};
module_platform_driver(ab8500_pwm_driver);
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 4459325d3650..a43b2babc809 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -281,11 +281,8 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
static int atmel_hlcdc_pwm_remove(struct platform_device *pdev)
{
struct atmel_hlcdc_pwm *chip = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&chip->chip);
- if (ret)
- return ret;
+ pwmchip_remove(&chip->chip);
clk_disable_unprepare(chip->hlcdc->periph_clk);
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index bf398f21484d..36f7ea381838 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -503,11 +503,8 @@ err_slow_clk:
static int atmel_tcb_pwm_remove(struct platform_device *pdev)
{
struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
- int err;
- err = pwmchip_remove(&tcbpwm->chip);
- if (err < 0)
- return err;
+ pwmchip_remove(&tcbpwm->chip);
clk_disable_unprepare(tcbpwm->slow_clk);
clk_put(tcbpwm->slow_clk);
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index a8162bae3e8a..e748604403cc 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -84,9 +84,19 @@ struct atmel_pwm_chip {
void __iomem *base;
const struct atmel_pwm_data *data;
- unsigned int updated_pwms;
- /* ISR is cleared when read, ensure only one thread does that */
- struct mutex isr_lock;
+ /*
+ * The hardware supports a mechanism to update a channel's duty cycle at
+ * the end of the currently running period. When such an update is
+ * pending we delay disabling the PWM until the new configuration is
+ * active because otherwise pmw_config(duty_cycle=0); pwm_disable();
+ * might not result in an inactive output.
+ * This bitmask tracks for which channels an update is pending in
+ * hardware.
+ */
+ u32 update_pending;
+
+ /* Protects .update_pending */
+ spinlock_t lock;
};
static inline struct atmel_pwm_chip *to_atmel_pwm_chip(struct pwm_chip *chip)
@@ -123,6 +133,64 @@ static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip,
atmel_pwm_writel(chip, base + offset, val);
}
+static void atmel_pwm_update_pending(struct atmel_pwm_chip *chip)
+{
+ /*
+ * Each channel that has its bit in ISR set started a new period since
+ * ISR was cleared and so there is no more update pending. Note that
+ * reading ISR clears it, so this needs to handle all channels to not
+ * loose information.
+ */
+ u32 isr = atmel_pwm_readl(chip, PWM_ISR);
+
+ chip->update_pending &= ~isr;
+}
+
+static void atmel_pwm_set_pending(struct atmel_pwm_chip *chip, unsigned int ch)
+{
+ spin_lock(&chip->lock);
+
+ /*
+ * Clear pending flags in hardware because otherwise there might still
+ * be a stale flag in ISR.
+ */
+ atmel_pwm_update_pending(chip);
+
+ chip->update_pending |= (1 << ch);
+
+ spin_unlock(&chip->lock);
+}
+
+static int atmel_pwm_test_pending(struct atmel_pwm_chip *chip, unsigned int ch)
+{
+ int ret = 0;
+
+ spin_lock(&chip->lock);
+
+ if (chip->update_pending & (1 << ch)) {
+ atmel_pwm_update_pending(chip);
+
+ if (chip->update_pending & (1 << ch))
+ ret = 1;
+ }
+
+ spin_unlock(&chip->lock);
+
+ return ret;
+}
+
+static int atmel_pwm_wait_nonpending(struct atmel_pwm_chip *chip, unsigned int ch)
+{
+ unsigned long timeout = jiffies + 2 * HZ;
+ int ret;
+
+ while ((ret = atmel_pwm_test_pending(chip, ch)) &&
+ time_before(jiffies, timeout))
+ usleep_range(10, 100);
+
+ return ret ? -ETIMEDOUT : 0;
+}
+
static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
unsigned long clkrate,
const struct pwm_state *state,
@@ -185,6 +253,7 @@ static void atmel_pwm_update_cdty(struct pwm_chip *chip, struct pwm_device *pwm,
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm,
atmel_pwm->data->regs.duty_upd, cdty);
+ atmel_pwm_set_pending(atmel_pwm, pwm->hwpwm);
}
static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip,
@@ -205,20 +274,8 @@ static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
unsigned long timeout = jiffies + 2 * HZ;
- /*
- * Wait for at least a complete period to have passed before disabling a
- * channel to be sure that CDTY has been updated
- */
- mutex_lock(&atmel_pwm->isr_lock);
- atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR);
-
- while (!(atmel_pwm->updated_pwms & (1 << pwm->hwpwm)) &&
- time_before(jiffies, timeout)) {
- usleep_range(10, 100);
- atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR);
- }
+ atmel_pwm_wait_nonpending(atmel_pwm, pwm->hwpwm);
- mutex_unlock(&atmel_pwm->isr_lock);
atmel_pwm_writel(atmel_pwm, PWM_DIS, 1 << pwm->hwpwm);
/*
@@ -292,10 +349,6 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
val |= PWM_CMR_CPOL;
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
atmel_pwm_set_cprd_cdty(chip, pwm, cprd, cdty);
- mutex_lock(&atmel_pwm->isr_lock);
- atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR);
- atmel_pwm->updated_pwms &= ~(1 << pwm->hwpwm);
- mutex_unlock(&atmel_pwm->isr_lock);
atmel_pwm_writel(atmel_pwm, PWM_ENA, 1 << pwm->hwpwm);
} else if (cstate.enabled) {
atmel_pwm_disable(chip, pwm, true);
@@ -326,6 +379,9 @@ static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
tmp <<= pres;
state->period = DIV64_U64_ROUND_UP(tmp, rate);
+ /* Wait for an updated duty_cycle queued in hardware */
+ atmel_pwm_wait_nonpending(atmel_pwm, pwm->hwpwm);
+
cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
atmel_pwm->data->regs.duty);
tmp = (u64)(cprd - cdty) * NSEC_PER_SEC;
@@ -416,9 +472,10 @@ static int atmel_pwm_probe(struct platform_device *pdev)
if (!atmel_pwm)
return -ENOMEM;
- mutex_init(&atmel_pwm->isr_lock);
atmel_pwm->data = of_device_get_match_data(&pdev->dev);
- atmel_pwm->updated_pwms = 0;
+
+ atmel_pwm->update_pending = 0;
+ spin_lock_init(&atmel_pwm->lock);
atmel_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(atmel_pwm->base))
@@ -460,7 +517,6 @@ static int atmel_pwm_remove(struct platform_device *pdev)
pwmchip_remove(&atmel_pwm->chip);
clk_unprepare(atmel_pwm->clk);
- mutex_destroy(&atmel_pwm->isr_lock);
return 0;
}
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 8c85c66ea5c9..64148f5f81d0 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -267,8 +267,6 @@ static int kona_pwmc_probe(struct platform_device *pdev)
if (kp == NULL)
return -ENOMEM;
- platform_set_drvdata(pdev, kp);
-
kp->chip.dev = &pdev->dev;
kp->chip.ops = &kona_pwm_ops;
kp->chip.npwm = 6;
@@ -298,20 +296,13 @@ static int kona_pwmc_probe(struct platform_device *pdev)
clk_disable_unprepare(kp->clk);
- ret = pwmchip_add(&kp->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &kp->chip);
if (ret < 0)
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
return ret;
}
-static int kona_pwmc_remove(struct platform_device *pdev)
-{
- struct kona_pwmc *kp = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&kp->chip);
-}
-
static const struct of_device_id bcm_kona_pwmc_dt[] = {
{ .compatible = "brcm,kona-pwm" },
{ },
@@ -324,7 +315,6 @@ static struct platform_driver kona_pwmc_driver = {
.of_match_table = bcm_kona_pwmc_dt,
},
.probe = kona_pwmc_probe,
- .remove = kona_pwmc_remove,
};
module_platform_driver(kona_pwmc_driver);
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index 8b1d1e7aa856..3b529f82b97c 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -282,12 +282,11 @@ out_clk:
static int brcmstb_pwm_remove(struct platform_device *pdev)
{
struct brcmstb_pwm *p = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&p->chip);
+ pwmchip_remove(&p->chip);
clk_disable_unprepare(p->clk);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 9fffb566af5f..5e29d9c682c3 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -280,7 +280,9 @@ static int cros_ec_pwm_remove(struct platform_device *dev)
struct cros_ec_pwm_device *ec_pwm = platform_get_drvdata(dev);
struct pwm_chip *chip = &ec_pwm->chip;
- return pwmchip_remove(chip);
+ pwmchip_remove(chip);
+
+ return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index fc3cb7d669c6..c45a75e65c86 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -183,27 +183,18 @@ static int ep93xx_pwm_probe(struct platform_device *pdev)
ep93xx_pwm->chip.ops = &ep93xx_pwm_ops;
ep93xx_pwm->chip.npwm = 1;
- ret = pwmchip_add(&ep93xx_pwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &ep93xx_pwm->chip);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, ep93xx_pwm);
return 0;
}
-static int ep93xx_pwm_remove(struct platform_device *pdev)
-{
- struct ep93xx_pwm *ep93xx_pwm = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&ep93xx_pwm->chip);
-}
-
static struct platform_driver ep93xx_pwm_driver = {
.driver = {
.name = "ep93xx-pwm",
},
.probe = ep93xx_pwm_probe,
- .remove = ep93xx_pwm_remove,
};
module_platform_driver(ep93xx_pwm_driver);
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 96ccd772280c..0247757f9a72 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -453,7 +453,7 @@ static int fsl_pwm_probe(struct platform_device *pdev)
fpc->chip.ops = &fsl_pwm_ops;
fpc->chip.npwm = 8;
- ret = pwmchip_add(&fpc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &fpc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
return ret;
@@ -464,13 +464,6 @@ static int fsl_pwm_probe(struct platform_device *pdev)
return fsl_pwm_init(fpc);
}
-static int fsl_pwm_remove(struct platform_device *pdev)
-{
- struct fsl_pwm_chip *fpc = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&fpc->chip);
-}
-
#ifdef CONFIG_PM_SLEEP
static int fsl_pwm_suspend(struct device *dev)
{
@@ -552,7 +545,6 @@ static struct platform_driver fsl_pwm_driver = {
.pm = &fsl_pwm_pm_ops,
},
.probe = fsl_pwm_probe,
- .remove = fsl_pwm_remove,
};
module_platform_driver(fsl_pwm_driver);
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index 4a6e9ad3c0ff..333f1b18ff4e 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -248,13 +248,15 @@ static int hibvt_pwm_remove(struct platform_device *pdev)
pwm_chip = platform_get_drvdata(pdev);
+ pwmchip_remove(&pwm_chip->chip);
+
reset_control_assert(pwm_chip->rstc);
msleep(30);
reset_control_deassert(pwm_chip->rstc);
clk_disable_unprepare(pwm_chip->clk);
- return pwmchip_remove(&pwm_chip->chip);
+ return 0;
}
static const struct of_device_id hibvt_pwm_of_match[] = {
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 11b16ecc4f96..f97f82548293 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -326,28 +326,14 @@ err_pm_disable:
static int img_pwm_remove(struct platform_device *pdev)
{
struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev);
- u32 val;
- unsigned int i;
- int ret;
-
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put(&pdev->dev);
- return ret;
- }
-
- for (i = 0; i < pwm_chip->chip.npwm; i++) {
- val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
- val &= ~BIT(i);
- img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
- }
- pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
img_pwm_runtime_suspend(&pdev->dev);
- return pwmchip_remove(&pwm_chip->chip);
+ pwmchip_remove(&pwm_chip->chip);
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index dbb50493abdd..e5e7b7c339a8 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -382,11 +382,12 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
static int pwm_imx_tpm_remove(struct platform_device *pdev)
{
struct imx_tpm_pwm_chip *tpm = platform_get_drvdata(pdev);
- int ret = pwmchip_remove(&tpm->chip);
+
+ pwmchip_remove(&tpm->chip);
clk_disable_unprepare(tpm->clk);
- return ret;
+ return 0;
}
static int __maybe_unused pwm_imx_tpm_suspend(struct device *dev)
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index f6588a96fbd9..ea91a2f81a9f 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -313,8 +313,6 @@ static int pwm_imx27_probe(struct platform_device *pdev)
if (imx == NULL)
return -ENOMEM;
- platform_set_drvdata(pdev, imx);
-
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx->clk_ipg))
return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_ipg),
@@ -342,16 +340,7 @@ static int pwm_imx27_probe(struct platform_device *pdev)
if (!(pwmcr & MX3_PWMCR_EN))
pwm_imx27_clk_disable_unprepare(imx);
- return pwmchip_add(&imx->chip);
-}
-
-static int pwm_imx27_remove(struct platform_device *pdev)
-{
- struct pwm_imx27_chip *imx;
-
- imx = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&imx->chip);
+ return devm_pwmchip_add(&pdev->dev, &imx->chip);
}
static struct platform_driver imx_pwm_driver = {
@@ -360,7 +349,6 @@ static struct platform_driver imx_pwm_driver = {
.of_match_table = pwm_imx27_dt_ids,
},
.probe = pwm_imx27_probe,
- .remove = pwm_imx27_remove,
};
module_platform_driver(imx_pwm_driver);
diff --git a/drivers/pwm/pwm-intel-lgm.c b/drivers/pwm/pwm-intel-lgm.c
index 015f5eba09a1..b66c35074087 100644
--- a/drivers/pwm/pwm-intel-lgm.c
+++ b/drivers/pwm/pwm-intel-lgm.c
@@ -176,8 +176,6 @@ static int lgm_pwm_probe(struct platform_device *pdev)
if (!pc)
return -ENOMEM;
- platform_set_drvdata(pdev, pc);
-
io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
@@ -210,20 +208,13 @@ static int lgm_pwm_probe(struct platform_device *pdev)
lgm_pwm_init(pc);
- ret = pwmchip_add(&pc->chip);
+ ret = devm_pwmchip_add(dev, &pc->chip);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to add PWM chip\n");
return 0;
}
-static int lgm_pwm_remove(struct platform_device *pdev)
-{
- struct lgm_pwm_chip *pc = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&pc->chip);
-}
-
static const struct of_device_id lgm_pwm_of_match[] = {
{ .compatible = "intel,lgm-pwm" },
{ }
@@ -236,7 +227,6 @@ static struct platform_driver lgm_pwm_driver = {
.of_match_table = lgm_pwm_of_match,
},
.probe = lgm_pwm_probe,
- .remove = lgm_pwm_remove,
};
module_platform_driver(lgm_pwm_driver);
diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
index 6c6e26d18329..54bd95a5cab0 100644
--- a/drivers/pwm/pwm-iqs620a.c
+++ b/drivers/pwm/pwm-iqs620a.c
@@ -189,7 +189,6 @@ static int iqs620_pwm_probe(struct platform_device *pdev)
if (!iqs620_pwm)
return -ENOMEM;
- platform_set_drvdata(pdev, iqs620_pwm);
iqs620_pwm->iqs62x = iqs62x;
ret = regmap_read(iqs62x->regmap, IQS620_PWR_SETTINGS, &val);
@@ -224,31 +223,18 @@ static int iqs620_pwm_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = pwmchip_add(&iqs620_pwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &iqs620_pwm->chip);
if (ret)
dev_err(&pdev->dev, "Failed to add device: %d\n", ret);
return ret;
}
-static int iqs620_pwm_remove(struct platform_device *pdev)
-{
- struct iqs620_pwm_private *iqs620_pwm = platform_get_drvdata(pdev);
- int ret;
-
- ret = pwmchip_remove(&iqs620_pwm->chip);
- if (ret)
- dev_err(&pdev->dev, "Failed to remove device: %d\n", ret);
-
- return ret;
-}
-
static struct platform_driver iqs620_pwm_platform_driver = {
.driver = {
.name = "iqs620a-pwm",
},
.probe = iqs620_pwm_probe,
- .remove = iqs620_pwm_remove,
};
module_platform_driver(iqs620_pwm_platform_driver);
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 990e7904c7f1..23dc1fb770e2 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -245,16 +245,7 @@ static int jz4740_pwm_probe(struct platform_device *pdev)
jz4740->chip.ops = &jz4740_pwm_ops;
jz4740->chip.npwm = info->num_pwms;
- platform_set_drvdata(pdev, jz4740);
-
- return pwmchip_add(&jz4740->chip);
-}
-
-static int jz4740_pwm_remove(struct platform_device *pdev)
-{
- struct jz4740_pwm_chip *jz4740 = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&jz4740->chip);
+ return devm_pwmchip_add(dev, &jz4740->chip);
}
static const struct soc_info __maybe_unused jz4740_soc_info = {
@@ -280,7 +271,6 @@ static struct platform_driver jz4740_pwm_driver = {
.of_match_table = of_match_ptr(jz4740_pwm_dt_ids),
},
.probe = jz4740_pwm_probe,
- .remove = jz4740_pwm_remove,
};
module_platform_driver(jz4740_pwm_driver);
diff --git a/drivers/pwm/pwm-keembay.c b/drivers/pwm/pwm-keembay.c
index 521a825c8ba0..733811b05721 100644
--- a/drivers/pwm/pwm-keembay.c
+++ b/drivers/pwm/pwm-keembay.c
@@ -207,22 +207,13 @@ static int keembay_pwm_probe(struct platform_device *pdev)
priv->chip.ops = &keembay_pwm_ops;
priv->chip.npwm = KMB_TOTAL_PWM_CHANNELS;
- ret = pwmchip_add(&priv->chip);
+ ret = devm_pwmchip_add(dev, &priv->chip);
if (ret)
return dev_err_probe(dev, ret, "Failed to add PWM chip\n");
- platform_set_drvdata(pdev, priv);
-
return 0;
}
-static int keembay_pwm_remove(struct platform_device *pdev)
-{
- struct keembay_pwm *priv = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&priv->chip);
-}
-
static const struct of_device_id keembay_pwm_of_match[] = {
{ .compatible = "intel,keembay-pwm" },
{ }
@@ -231,7 +222,6 @@ MODULE_DEVICE_TABLE(of, keembay_pwm_of_match);
static struct platform_driver keembay_pwm_driver = {
.probe = keembay_pwm_probe,
- .remove = keembay_pwm_remove,
.driver = {
.name = "pwm-keembay",
.of_match_table = keembay_pwm_of_match,
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 7551253ada32..ea17d446a627 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -276,16 +276,7 @@ static int lp3943_pwm_probe(struct platform_device *pdev)
lp3943_pwm->chip.ops = &lp3943_pwm_ops;
lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
- platform_set_drvdata(pdev, lp3943_pwm);
-
- return pwmchip_add(&lp3943_pwm->chip);
-}
-
-static int lp3943_pwm_remove(struct platform_device *pdev)
-{
- struct lp3943_pwm *lp3943_pwm = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&lp3943_pwm->chip);
+ return devm_pwmchip_add(&pdev->dev, &lp3943_pwm->chip);
}
#ifdef CONFIG_OF
@@ -298,7 +289,6 @@ MODULE_DEVICE_TABLE(of, lp3943_pwm_of_match);
static struct platform_driver lp3943_pwm_driver = {
.probe = lp3943_pwm_probe,
- .remove = lp3943_pwm_remove,
.driver = {
.name = "lp3943-pwm",
.of_match_table = of_match_ptr(lp3943_pwm_of_match),
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 2834a0f001d3..ddeab5687cb8 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -117,29 +117,20 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
lpc32xx->chip.ops = &lpc32xx_pwm_ops;
lpc32xx->chip.npwm = 1;
- ret = pwmchip_add(&lpc32xx->chip);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add PWM chip, error %d\n", ret);
- return ret;
- }
-
- /* When PWM is disable, configure the output to the default value */
+ /* If PWM is disabled, configure the output to the default value */
val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
val &= ~PWM_PIN_LEVEL;
writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
- platform_set_drvdata(pdev, lpc32xx);
+ ret = devm_pwmchip_add(&pdev->dev, &lpc32xx->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add PWM chip, error %d\n", ret);
+ return ret;
+ }
return 0;
}
-static int lpc32xx_pwm_remove(struct platform_device *pdev)
-{
- struct lpc32xx_pwm_chip *lpc32xx = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&lpc32xx->chip);
-}
-
static const struct of_device_id lpc32xx_pwm_dt_ids[] = {
{ .compatible = "nxp,lpc3220-pwm", },
{ /* sentinel */ }
@@ -152,7 +143,6 @@ static struct platform_driver lpc32xx_pwm_driver = {
.of_match_table = lpc32xx_pwm_dt_ids,
},
.probe = lpc32xx_pwm_probe,
- .remove = lpc32xx_pwm_remove,
};
module_platform_driver(lpc32xx_pwm_driver);
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index b4a31060bcd7..0d4dd80e9f07 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -253,13 +253,11 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
}
}
- platform_set_drvdata(pdev, pc);
-
pc->chip.dev = &pdev->dev;
pc->chip.ops = &pwm_mediatek_ops;
pc->chip.npwm = pc->soc->num_pwms;
- ret = pwmchip_add(&pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
@@ -268,13 +266,6 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
return 0;
}
-static int pwm_mediatek_remove(struct platform_device *pdev)
-{
- struct pwm_mediatek_chip *pc = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&pc->chip);
-}
-
static const struct pwm_mediatek_of_data mt2712_pwm_data = {
.num_pwms = 8,
.pwm45_fixup = false,
@@ -335,7 +326,6 @@ static struct platform_driver pwm_mediatek_driver = {
.of_match_table = pwm_mediatek_of_match,
},
.probe = pwm_mediatek_probe,
- .remove = pwm_mediatek_remove,
};
module_platform_driver(pwm_mediatek_driver);
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 9b3ba401a3db..c605013e4114 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -5,6 +5,7 @@
* Author: YH Huang <yh.huang@mediatek.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -47,6 +48,7 @@ struct mtk_disp_pwm {
struct clk *clk_main;
struct clk *clk_mm;
void __iomem *base;
+ bool enabled;
};
static inline struct mtk_disp_pwm *to_mtk_disp_pwm(struct pwm_chip *chip)
@@ -66,14 +68,47 @@ static void mtk_disp_pwm_update_bits(struct mtk_disp_pwm *mdp, u32 offset,
writel(value, address);
}
-static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
u32 clk_div, period, high_width, value;
u64 div, rate;
int err;
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
+ 0x0);
+
+ if (mdp->enabled) {
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
+ }
+
+ mdp->enabled = false;
+ return 0;
+ }
+
+ if (!mdp->enabled) {
+ err = clk_prepare_enable(mdp->clk_main);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = clk_prepare_enable(mdp->clk_mm);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n",
+ ERR_PTR(err));
+ clk_disable_unprepare(mdp->clk_main);
+ return err;
+ }
+ }
+
/*
* Find period, high_width and clk_div to suit duty_ns and period_ns.
* Calculate proper div value to keep period value in the bound.
@@ -85,29 +120,24 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* high_width = (PWM_CLK_RATE * duty_ns) / (10^9 * (clk_div + 1))
*/
rate = clk_get_rate(mdp->clk_main);
- clk_div = div_u64(rate * period_ns, NSEC_PER_SEC) >>
+ clk_div = mul_u64_u64_div_u64(state->period, rate, NSEC_PER_SEC) >>
PWM_PERIOD_BIT_WIDTH;
- if (clk_div > PWM_CLKDIV_MAX)
+ if (clk_div > PWM_CLKDIV_MAX) {
+ if (!mdp->enabled) {
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
+ }
return -EINVAL;
+ }
div = NSEC_PER_SEC * (clk_div + 1);
- period = div64_u64(rate * period_ns, div);
+ period = mul_u64_u64_div_u64(state->period, rate, div);
if (period > 0)
period--;
- high_width = div64_u64(rate * duty_ns, div);
+ high_width = mul_u64_u64_div_u64(state->duty_cycle, rate, div);
value = period | (high_width << PWM_HIGH_WIDTH_SHIFT);
- err = clk_enable(mdp->clk_main);
- if (err < 0)
- return err;
-
- err = clk_enable(mdp->clk_mm);
- if (err < 0) {
- clk_disable(mdp->clk_main);
- return err;
- }
-
mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
PWM_CLKDIV_MASK,
clk_div << PWM_CLKDIV_SHIFT);
@@ -122,50 +152,70 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
mtk_disp_pwm_update_bits(mdp, mdp->data->commit,
mdp->data->commit_mask,
0x0);
+ } else {
+ /*
+ * For MT2701, disable double buffer before writing register
+ * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
+ */
+ mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
+ mdp->data->bls_debug_mask,
+ mdp->data->bls_debug_mask);
+ mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
+ mdp->data->con0_sel,
+ mdp->data->con0_sel);
}
- clk_disable(mdp->clk_mm);
- clk_disable(mdp->clk_main);
+ mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
+ mdp->data->enable_mask);
+ mdp->enabled = true;
return 0;
}
-static int mtk_disp_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static void mtk_disp_pwm_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
{
struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
+ u64 rate, period, high_width;
+ u32 clk_div, con0, con1;
int err;
- err = clk_enable(mdp->clk_main);
- if (err < 0)
- return err;
-
- err = clk_enable(mdp->clk_mm);
+ err = clk_prepare_enable(mdp->clk_main);
if (err < 0) {
- clk_disable(mdp->clk_main);
- return err;
+ dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
+ return;
}
- mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
- mdp->data->enable_mask);
-
- return 0;
-}
-
-static void mtk_disp_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
-
- mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
- 0x0);
+ err = clk_prepare_enable(mdp->clk_mm);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
+ clk_disable_unprepare(mdp->clk_main);
+ return;
+ }
- clk_disable(mdp->clk_mm);
- clk_disable(mdp->clk_main);
+ rate = clk_get_rate(mdp->clk_main);
+ con0 = readl(mdp->base + mdp->data->con0);
+ con1 = readl(mdp->base + mdp->data->con1);
+ state->enabled = !!(con0 & BIT(0));
+ clk_div = FIELD_GET(PWM_CLKDIV_MASK, con0);
+ period = FIELD_GET(PWM_PERIOD_MASK, con1);
+ /*
+ * period has 12 bits, clk_div 11 and NSEC_PER_SEC has 30,
+ * so period * (clk_div + 1) * NSEC_PER_SEC doesn't overflow.
+ */
+ state->period = DIV64_U64_ROUND_UP(period * (clk_div + 1) * NSEC_PER_SEC, rate);
+ high_width = FIELD_GET(PWM_HIGH_WIDTH_MASK, con1);
+ state->duty_cycle = DIV64_U64_ROUND_UP(high_width * (clk_div + 1) * NSEC_PER_SEC,
+ rate);
+ state->polarity = PWM_POLARITY_NORMAL;
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
}
static const struct pwm_ops mtk_disp_pwm_ops = {
- .config = mtk_disp_pwm_config,
- .enable = mtk_disp_pwm_enable,
- .disable = mtk_disp_pwm_disable,
+ .apply = mtk_disp_pwm_apply,
+ .get_state = mtk_disp_pwm_get_state,
.owner = THIS_MODULE,
};
@@ -192,58 +242,28 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
if (IS_ERR(mdp->clk_mm))
return PTR_ERR(mdp->clk_mm);
- ret = clk_prepare(mdp->clk_main);
- if (ret < 0)
- return ret;
-
- ret = clk_prepare(mdp->clk_mm);
- if (ret < 0)
- goto disable_clk_main;
-
mdp->chip.dev = &pdev->dev;
mdp->chip.ops = &mtk_disp_pwm_ops;
mdp->chip.npwm = 1;
ret = pwmchip_add(&mdp->chip);
if (ret < 0) {
- dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
- goto disable_clk_mm;
+ dev_err(&pdev->dev, "pwmchip_add() failed: %pe\n", ERR_PTR(ret));
+ return ret;
}
platform_set_drvdata(pdev, mdp);
- /*
- * For MT2701, disable double buffer before writing register
- * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
- */
- if (!mdp->data->has_commit) {
- mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
- mdp->data->bls_debug_mask,
- mdp->data->bls_debug_mask);
- mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
- mdp->data->con0_sel,
- mdp->data->con0_sel);
- }
-
return 0;
-
-disable_clk_mm:
- clk_unprepare(mdp->clk_mm);
-disable_clk_main:
- clk_unprepare(mdp->clk_main);
- return ret;
}
static int mtk_disp_pwm_remove(struct platform_device *pdev)
{
struct mtk_disp_pwm *mdp = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&mdp->chip);
- clk_unprepare(mdp->clk_mm);
- clk_unprepare(mdp->clk_main);
+ pwmchip_remove(&mdp->chip);
- return ret;
+ return 0;
}
static const struct mtk_pwm_data mt2701_pwm_data = {
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index a22180803bd7..766dbc58dad8 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -145,30 +145,18 @@ static int mxs_pwm_probe(struct platform_device *pdev)
return ret;
}
- ret = pwmchip_add(&mxs->chip);
+ /* FIXME: Only do this if the PWM isn't already running */
+ ret = stmp_reset_block(mxs->base);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to reset PWM\n");
+
+ ret = devm_pwmchip_add(&pdev->dev, &mxs->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add pwm chip %d\n", ret);
return ret;
}
- platform_set_drvdata(pdev, mxs);
-
- ret = stmp_reset_block(mxs->base);
- if (ret)
- goto pwm_remove;
-
return 0;
-
-pwm_remove:
- pwmchip_remove(&mxs->chip);
- return ret;
-}
-
-static int mxs_pwm_remove(struct platform_device *pdev)
-{
- struct mxs_pwm_chip *mxs = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&mxs->chip);
}
static const struct of_device_id mxs_pwm_dt_ids[] = {
@@ -183,7 +171,6 @@ static struct platform_driver mxs_pwm_driver = {
.of_match_table = mxs_pwm_dt_ids,
},
.probe = mxs_pwm_probe,
- .remove = mxs_pwm_remove,
};
module_platform_driver(mxs_pwm_driver);
diff --git a/drivers/pwm/pwm-ntxec.c b/drivers/pwm/pwm-ntxec.c
index 50c454c553c4..ab63b081df53 100644
--- a/drivers/pwm/pwm-ntxec.c
+++ b/drivers/pwm/pwm-ntxec.c
@@ -150,23 +150,12 @@ static int ntxec_pwm_probe(struct platform_device *pdev)
priv->ec = ec;
priv->dev = &pdev->dev;
- platform_set_drvdata(pdev, priv);
-
chip = &priv->chip;
chip->dev = &pdev->dev;
chip->ops = &ntxec_pwm_ops;
- chip->base = -1;
chip->npwm = 1;
- return pwmchip_add(chip);
-}
-
-static int ntxec_pwm_remove(struct platform_device *pdev)
-{
- struct ntxec_pwm *priv = platform_get_drvdata(pdev);
- struct pwm_chip *chip = &priv->chip;
-
- return pwmchip_remove(chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
static struct platform_driver ntxec_pwm_driver = {
@@ -174,7 +163,6 @@ static struct platform_driver ntxec_pwm_driver = {
.name = "ntxec-pwm",
},
.probe = ntxec_pwm_probe,
- .remove = ntxec_pwm_remove,
};
module_platform_driver(ntxec_pwm_driver);
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 507a2d945b90..fa800fcf31d4 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -444,11 +444,8 @@ err_find_timer_pdev:
static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
{
struct pwm_omap_dmtimer_chip *omap = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&omap->chip);
- if (ret)
- return ret;
+ pwmchip_remove(&omap->chip);
if (pm_runtime_active(&omap->dm_timer_pdev->dev))
omap->pdata->stop(omap->dm_timer);
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 42ed770b432c..c56001a790d0 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -601,11 +601,8 @@ static int pca9685_pwm_probe(struct i2c_client *client,
static int pca9685_pwm_remove(struct i2c_client *client)
{
struct pca9685 *pca = i2c_get_clientdata(client);
- int ret;
- ret = pwmchip_remove(&pca->chip);
- if (ret)
- return ret;
+ pwmchip_remove(&pca->chip);
if (!pm_runtime_enabled(&client->dev)) {
/* Put chip in sleep state if runtime PM is disabled */
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index e091a528e33c..a9efdcf839ae 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -195,32 +195,21 @@ static int pwm_probe(struct platform_device *pdev)
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
- ret = pwmchip_add(&pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
}
- platform_set_drvdata(pdev, pc);
return 0;
}
-static int pwm_remove(struct platform_device *pdev)
-{
- struct pxa_pwm_chip *pc;
-
- pc = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&pc->chip);
-}
-
static struct platform_driver pwm_driver = {
.driver = {
.name = "pxa25x-pwm",
.of_match_table = pwm_of_match,
},
.probe = pwm_probe,
- .remove = pwm_remove,
.id_table = pwm_id_table,
};
diff --git a/drivers/pwm/pwm-raspberrypi-poe.c b/drivers/pwm/pwm-raspberrypi-poe.c
index 043fc32e8be8..579a15240e0a 100644
--- a/drivers/pwm/pwm-raspberrypi-poe.c
+++ b/drivers/pwm/pwm-raspberrypi-poe.c
@@ -166,8 +166,6 @@ static int raspberrypi_pwm_probe(struct platform_device *pdev)
rpipwm->chip.base = -1;
rpipwm->chip.npwm = RASPBERRYPI_FIRMWARE_PWM_NUM;
- platform_set_drvdata(pdev, rpipwm);
-
ret = raspberrypi_pwm_get_property(rpipwm->firmware, RPI_PWM_CUR_DUTY_REG,
&rpipwm->duty_cycle);
if (ret) {
@@ -175,14 +173,7 @@ static int raspberrypi_pwm_probe(struct platform_device *pdev)
return ret;
}
- return pwmchip_add(&rpipwm->chip);
-}
-
-static int raspberrypi_pwm_remove(struct platform_device *pdev)
-{
- struct raspberrypi_pwm *rpipwm = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&rpipwm->chip);
+ return devm_pwmchip_add(dev, &rpipwm->chip);
}
static const struct of_device_id raspberrypi_pwm_of_match[] = {
@@ -197,7 +188,6 @@ static struct platform_driver raspberrypi_pwm_driver = {
.of_match_table = raspberrypi_pwm_of_match,
},
.probe = raspberrypi_pwm_probe,
- .remove = raspberrypi_pwm_remove,
};
module_platform_driver(raspberrypi_pwm_driver);
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 9daca0c772c7..b437192380e2 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -241,13 +241,12 @@ static int rcar_pwm_probe(struct platform_device *pdev)
static int rcar_pwm_remove(struct platform_device *pdev)
{
struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&rcar_pwm->chip);
+ pwmchip_remove(&rcar_pwm->chip);
pm_runtime_disable(&pdev->dev);
- return ret;
+ return 0;
}
static const struct of_device_id rcar_pwm_of_table[] = {
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index b853e7942605..4381df90a527 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -425,13 +425,12 @@ static int tpu_probe(struct platform_device *pdev)
static int tpu_remove(struct platform_device *pdev)
{
struct tpu_device *tpu = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&tpu->chip);
+ pwmchip_remove(&tpu->chip);
pm_runtime_disable(&pdev->dev);
- return ret;
+ return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index cbe900877724..f3647b317152 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -384,24 +384,12 @@ static int rockchip_pwm_remove(struct platform_device *pdev)
{
struct rockchip_pwm_chip *pc = platform_get_drvdata(pdev);
- /*
- * Disable the PWM clk before unpreparing it if the PWM device is still
- * running. This should only happen when the last PWM user left it
- * enabled, or when nobody requested a PWM that was previously enabled
- * by the bootloader.
- *
- * FIXME: Maybe the core should disable all PWM devices in
- * pwmchip_remove(). In this case we'd only have to call
- * clk_unprepare() after pwmchip_remove().
- *
- */
- if (pwm_is_enabled(pc->chip.pwms))
- clk_disable(pc->clk);
+ pwmchip_remove(&pc->chip);
clk_unprepare(pc->pclk);
clk_unprepare(pc->clk);
- return pwmchip_remove(&pc->chip);
+ return 0;
}
static struct platform_driver rockchip_pwm_driver = {
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index f6c528f02d43..dd94c4312a0c 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -580,11 +580,8 @@ static int pwm_samsung_probe(struct platform_device *pdev)
static int pwm_samsung_remove(struct platform_device *pdev)
{
struct samsung_pwm_chip *chip = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&chip->chip);
- if (ret < 0)
- return ret;
+ pwmchip_remove(&chip->chip);
clk_disable_unprepare(chip->base_clk);
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 420edc4aa94a..253c4a17d255 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -291,7 +291,7 @@ static int pwm_sifive_remove(struct platform_device *dev)
struct pwm_sifive_ddata *ddata = platform_get_drvdata(dev);
bool is_enabled = false;
struct pwm_device *pwm;
- int ret, ch;
+ int ch;
for (ch = 0; ch < ddata->chip.npwm; ch++) {
pwm = &ddata->chip.pwms[ch];
@@ -304,10 +304,10 @@ static int pwm_sifive_remove(struct platform_device *dev)
clk_disable(ddata->clk);
clk_disable_unprepare(ddata->clk);
- ret = pwmchip_remove(&ddata->chip);
+ pwmchip_remove(&ddata->chip);
clk_notifier_unregister(ddata->clk, &ddata->notifier);
- return ret;
+ return 0;
}
static const struct of_device_id pwm_sifive_of_match[] = {
diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c
index 7a69c1a0c060..589aeaaa6ac8 100644
--- a/drivers/pwm/pwm-sl28cpld.c
+++ b/drivers/pwm/pwm-sl28cpld.c
@@ -231,9 +231,7 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
chip->ops = &sl28cpld_pwm_ops;
chip->npwm = 1;
- platform_set_drvdata(pdev, priv);
-
- ret = pwmchip_add(&priv->pwm_chip);
+ ret = devm_pwmchip_add(&pdev->dev, &priv->pwm_chip);
if (ret) {
dev_err(&pdev->dev, "failed to add PWM chip (%pe)",
ERR_PTR(ret));
@@ -243,13 +241,6 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
return 0;
}
-static int sl28cpld_pwm_remove(struct platform_device *pdev)
-{
- struct sl28cpld_pwm *priv = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&priv->pwm_chip);
-}
-
static const struct of_device_id sl28cpld_pwm_of_match[] = {
{ .compatible = "kontron,sl28cpld-pwm" },
{}
@@ -258,7 +249,6 @@ MODULE_DEVICE_TABLE(of, sl28cpld_pwm_of_match);
static struct platform_driver sl28cpld_pwm_driver = {
.probe = sl28cpld_pwm_probe,
- .remove = sl28cpld_pwm_remove,
.driver = {
.name = "sl28cpld-pwm",
.of_match_table = sl28cpld_pwm_of_match,
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 93dd03618465..3115abb3f52a 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -209,7 +209,7 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
priv->chip.ops = &stm32_pwm_lp_ops;
priv->chip.npwm = 1;
- ret = pwmchip_add(&priv->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &priv->chip);
if (ret < 0)
return ret;
@@ -218,15 +218,6 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
return 0;
}
-static int stm32_pwm_lp_remove(struct platform_device *pdev)
-{
- struct stm32_pwm_lp *priv = platform_get_drvdata(pdev);
-
- pwm_disable(&priv->chip.pwms[0]);
-
- return pwmchip_remove(&priv->chip);
-}
-
static int __maybe_unused stm32_pwm_lp_suspend(struct device *dev)
{
struct stm32_pwm_lp *priv = dev_get_drvdata(dev);
@@ -258,7 +249,6 @@ MODULE_DEVICE_TABLE(of, stm32_pwm_lp_of_match);
static struct platform_driver stm32_pwm_lp_driver = {
.probe = stm32_pwm_lp_probe,
- .remove = stm32_pwm_lp_remove,
.driver = {
.name = "stm32-pwm-lp",
.of_match_table = of_match_ptr(stm32_pwm_lp_of_match),
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index c952604e91f3..91ca67651abd 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -484,11 +484,8 @@ err_bus:
static int sun4i_pwm_remove(struct platform_device *pdev)
{
struct sun4i_pwm_chip *pwm = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&pwm->chip);
- if (ret)
- return ret;
+ pwmchip_remove(&pwm->chip);
clk_disable_unprepare(pwm->bus_clk);
reset_control_assert(pwm->rst);
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 35eb19a5a0d1..4701f0c9b921 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -253,7 +253,7 @@ static int ecap_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
- ret = pwmchip_add(&pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
@@ -267,11 +267,9 @@ static int ecap_pwm_probe(struct platform_device *pdev)
static int ecap_pwm_remove(struct platform_device *pdev)
{
- struct ecap_pwm_chip *pc = platform_get_drvdata(pdev);
-
pm_runtime_disable(&pdev->dev);
- return pwmchip_remove(&pc->chip);
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 17909fa53211..5b723a48c5f1 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -485,11 +485,13 @@ static int ehrpwm_pwm_remove(struct platform_device *pdev)
{
struct ehrpwm_pwm_chip *pc = platform_get_drvdata(pdev);
+ pwmchip_remove(&pc->chip);
+
clk_unprepare(pc->tbclk);
pm_runtime_disable(&pdev->dev);
- return pwmchip_remove(&pc->chip);
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index 6c8df5f4e87d..49d9f7a78012 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -276,7 +276,6 @@ static const struct pwm_ops twl6030_pwmled_ops = {
static int twl_pwmled_probe(struct platform_device *pdev)
{
struct twl_pwmled_chip *twl;
- int ret;
twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
@@ -294,20 +293,7 @@ static int twl_pwmled_probe(struct platform_device *pdev)
mutex_init(&twl->mutex);
- ret = pwmchip_add(&twl->chip);
- if (ret < 0)
- return ret;
-
- platform_set_drvdata(pdev, twl);
-
- return 0;
-}
-
-static int twl_pwmled_remove(struct platform_device *pdev)
-{
- struct twl_pwmled_chip *twl = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&twl->chip);
+ return devm_pwmchip_add(&pdev->dev, &twl->chip);
}
#ifdef CONFIG_OF
@@ -325,7 +311,6 @@ static struct platform_driver twl_pwmled_driver = {
.of_match_table = of_match_ptr(twl_pwmled_of_match),
},
.probe = twl_pwmled_probe,
- .remove = twl_pwmled_remove,
};
module_platform_driver(twl_pwmled_driver);
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index e83a826bf621..203194f2c92e 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -298,7 +298,6 @@ static const struct pwm_ops twl6030_pwm_ops = {
static int twl_pwm_probe(struct platform_device *pdev)
{
struct twl_pwm_chip *twl;
- int ret;
twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
@@ -314,20 +313,7 @@ static int twl_pwm_probe(struct platform_device *pdev)
mutex_init(&twl->mutex);
- ret = pwmchip_add(&twl->chip);
- if (ret < 0)
- return ret;
-
- platform_set_drvdata(pdev, twl);
-
- return 0;
-}
-
-static int twl_pwm_remove(struct platform_device *pdev)
-{
- struct twl_pwm_chip *twl = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&twl->chip);
+ return devm_pwmchip_add(&pdev->dev, &twl->chip);
}
#ifdef CONFIG_OF
@@ -345,7 +331,6 @@ static struct platform_driver twl_pwm_driver = {
.of_match_table = of_match_ptr(twl_pwm_of_match),
},
.probe = twl_pwm_probe,
- .remove = twl_pwm_remove,
};
module_platform_driver(twl_pwm_driver);
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index a79bee901e9b..401b1ec90785 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -833,6 +833,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sc8180x-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sc8180x-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sc8180x-mpss-pas", .data = &sc8180x_mpss_resource},
+ { .compatible = "qcom,sdm660-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init},
{ .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index f1cbc6b2edbb..ebadc6c08e11 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -142,18 +142,6 @@ static const struct wcnss_data pronto_v2_data = {
.num_vregs = 1,
};
-void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss,
- struct qcom_iris *iris,
- bool use_48mhz_xo)
-{
- mutex_lock(&wcnss->iris_lock);
-
- wcnss->iris = iris;
- wcnss->use_48mhz_xo = use_48mhz_xo;
-
- mutex_unlock(&wcnss->iris_lock);
-}
-
static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
@@ -639,12 +627,20 @@ static int wcnss_probe(struct platform_device *pdev)
goto detach_pds;
}
+ wcnss->iris = qcom_iris_probe(&pdev->dev, &wcnss->use_48mhz_xo);
+ if (IS_ERR(wcnss->iris)) {
+ ret = PTR_ERR(wcnss->iris);
+ goto detach_pds;
+ }
+
ret = rproc_add(rproc);
if (ret)
- goto detach_pds;
+ goto remove_iris;
- return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ return 0;
+remove_iris:
+ qcom_iris_remove(wcnss->iris);
detach_pds:
wcnss_release_pds(wcnss);
free_rproc:
@@ -657,7 +653,7 @@ static int wcnss_remove(struct platform_device *pdev)
{
struct qcom_wcnss *wcnss = platform_get_drvdata(pdev);
- of_platform_depopulate(&pdev->dev);
+ qcom_iris_remove(wcnss->iris);
rproc_del(wcnss->rproc);
@@ -686,28 +682,7 @@ static struct platform_driver wcnss_driver = {
},
};
-static int __init wcnss_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&wcnss_driver);
- if (ret)
- return ret;
-
- ret = platform_driver_register(&qcom_iris_driver);
- if (ret)
- platform_driver_unregister(&wcnss_driver);
-
- return ret;
-}
-module_init(wcnss_init);
-
-static void __exit wcnss_exit(void)
-{
- platform_driver_unregister(&qcom_iris_driver);
- platform_driver_unregister(&wcnss_driver);
-}
-module_exit(wcnss_exit);
+module_platform_driver(wcnss_driver);
MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_wcnss.h b/drivers/remoteproc/qcom_wcnss.h
index 62c8682d0a92..6d01ee6afa7f 100644
--- a/drivers/remoteproc/qcom_wcnss.h
+++ b/drivers/remoteproc/qcom_wcnss.h
@@ -17,9 +17,9 @@ struct wcnss_vreg_info {
bool super_turbo;
};
+struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo);
+void qcom_iris_remove(struct qcom_iris *iris);
int qcom_iris_enable(struct qcom_iris *iris);
void qcom_iris_disable(struct qcom_iris *iris);
-void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss, struct qcom_iris *iris, bool use_48mhz_xo);
-
#endif
diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
index 169acd305ae3..09720ddddc85 100644
--- a/drivers/remoteproc/qcom_wcnss_iris.c
+++ b/drivers/remoteproc/qcom_wcnss_iris.c
@@ -17,7 +17,7 @@
#include "qcom_wcnss.h"
struct qcom_iris {
- struct device *dev;
+ struct device dev;
struct clk *xo_clk;
@@ -75,7 +75,7 @@ int qcom_iris_enable(struct qcom_iris *iris)
ret = clk_prepare_enable(iris->xo_clk);
if (ret) {
- dev_err(iris->dev, "failed to enable xo clk\n");
+ dev_err(&iris->dev, "failed to enable xo clk\n");
goto disable_regulators;
}
@@ -93,43 +93,90 @@ void qcom_iris_disable(struct qcom_iris *iris)
regulator_bulk_disable(iris->num_vregs, iris->vregs);
}
-static int qcom_iris_probe(struct platform_device *pdev)
+static const struct of_device_id iris_of_match[] = {
+ { .compatible = "qcom,wcn3620", .data = &wcn3620_data },
+ { .compatible = "qcom,wcn3660", .data = &wcn3660_data },
+ { .compatible = "qcom,wcn3660b", .data = &wcn3680_data },
+ { .compatible = "qcom,wcn3680", .data = &wcn3680_data },
+ {}
+};
+
+static void qcom_iris_release(struct device *dev)
+{
+ struct qcom_iris *iris = container_of(dev, struct qcom_iris, dev);
+
+ of_node_put(iris->dev.of_node);
+ kfree(iris);
+}
+
+struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo)
{
+ const struct of_device_id *match;
const struct iris_data *data;
- struct qcom_wcnss *wcnss;
+ struct device_node *of_node;
struct qcom_iris *iris;
int ret;
int i;
- iris = devm_kzalloc(&pdev->dev, sizeof(struct qcom_iris), GFP_KERNEL);
- if (!iris)
- return -ENOMEM;
+ of_node = of_get_child_by_name(parent->of_node, "iris");
+ if (!of_node) {
+ dev_err(parent, "No child node \"iris\" found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ iris = kzalloc(sizeof(*iris), GFP_KERNEL);
+ if (!iris) {
+ of_node_put(of_node);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ device_initialize(&iris->dev);
+ iris->dev.parent = parent;
+ iris->dev.release = qcom_iris_release;
+ iris->dev.of_node = of_node;
+
+ dev_set_name(&iris->dev, "%s.iris", dev_name(parent));
+
+ ret = device_add(&iris->dev);
+ if (ret) {
+ put_device(&iris->dev);
+ return ERR_PTR(ret);
+ }
+
+ match = of_match_device(iris_of_match, &iris->dev);
+ if (!match) {
+ dev_err(&iris->dev, "no matching compatible for iris\n");
+ ret = -EINVAL;
+ goto err_device_del;
+ }
- data = of_device_get_match_data(&pdev->dev);
- wcnss = dev_get_drvdata(pdev->dev.parent);
+ data = match->data;
- iris->xo_clk = devm_clk_get(&pdev->dev, "xo");
+ iris->xo_clk = devm_clk_get(&iris->dev, "xo");
if (IS_ERR(iris->xo_clk)) {
- if (PTR_ERR(iris->xo_clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to acquire xo clk\n");
- return PTR_ERR(iris->xo_clk);
+ ret = PTR_ERR(iris->xo_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&iris->dev, "failed to acquire xo clk\n");
+ goto err_device_del;
}
iris->num_vregs = data->num_vregs;
- iris->vregs = devm_kcalloc(&pdev->dev,
+ iris->vregs = devm_kcalloc(&iris->dev,
iris->num_vregs,
sizeof(struct regulator_bulk_data),
GFP_KERNEL);
- if (!iris->vregs)
- return -ENOMEM;
+ if (!iris->vregs) {
+ ret = -ENOMEM;
+ goto err_device_del;
+ }
for (i = 0; i < iris->num_vregs; i++)
iris->vregs[i].supply = data->vregs[i].name;
- ret = devm_regulator_bulk_get(&pdev->dev, iris->num_vregs, iris->vregs);
+ ret = devm_regulator_bulk_get(&iris->dev, iris->num_vregs, iris->vregs);
if (ret) {
- dev_err(&pdev->dev, "failed to get regulators\n");
- return ret;
+ dev_err(&iris->dev, "failed to get regulators\n");
+ goto err_device_del;
}
for (i = 0; i < iris->num_vregs; i++) {
@@ -143,34 +190,17 @@ static int qcom_iris_probe(struct platform_device *pdev)
data->vregs[i].load_uA);
}
- qcom_wcnss_assign_iris(wcnss, iris, data->use_48mhz_xo);
-
- return 0;
-}
+ *use_48mhz_xo = data->use_48mhz_xo;
-static int qcom_iris_remove(struct platform_device *pdev)
-{
- struct qcom_wcnss *wcnss = dev_get_drvdata(pdev->dev.parent);
+ return iris;
- qcom_wcnss_assign_iris(wcnss, NULL, false);
+err_device_del:
+ device_del(&iris->dev);
- return 0;
+ return ERR_PTR(ret);
}
-static const struct of_device_id iris_of_match[] = {
- { .compatible = "qcom,wcn3620", .data = &wcn3620_data },
- { .compatible = "qcom,wcn3660", .data = &wcn3660_data },
- { .compatible = "qcom,wcn3660b", .data = &wcn3680_data },
- { .compatible = "qcom,wcn3680", .data = &wcn3680_data },
- {}
-};
-MODULE_DEVICE_TABLE(of, iris_of_match);
-
-struct platform_driver qcom_iris_driver = {
- .probe = qcom_iris_probe,
- .remove = qcom_iris_remove,
- .driver = {
- .name = "qcom-iris",
- .of_match_table = iris_of_match,
- },
-};
+void qcom_iris_remove(struct qcom_iris *iris)
+{
+ device_del(&iris->dev);
+}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 7de5905d276a..502b6604b757 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -2750,8 +2750,8 @@ void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
dev_err(&rproc->dev, "crash detected in %s: type %s\n",
rproc->name, rproc_crash_to_string(type));
- /* create a new task to handle the error */
- schedule_work(&rproc->crash_handler);
+ /* Have a worker handle the error; ensure system is not suspended */
+ queue_work(system_freezable_wq, &rproc->crash_handler);
}
EXPORT_SYMBOL(rproc_report_crash);
diff --git a/drivers/remoteproc/remoteproc_elf_helpers.h b/drivers/remoteproc/remoteproc_elf_helpers.h
index 26404e68e17a..e6de53a5000c 100644
--- a/drivers/remoteproc/remoteproc_elf_helpers.h
+++ b/drivers/remoteproc/remoteproc_elf_helpers.h
@@ -15,7 +15,7 @@
* fw_elf_get_class - Get elf class
* @fw: the ELF firmware image
*
- * Note that we use and elf32_hdr to access the class since the start of the
+ * Note that we use elf32_hdr to access the class since the start of the
* struct is the same for both elf class
*
* Return: elf class of the firmware
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 12153d5801ce..e1bc5214494e 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -75,6 +75,15 @@ config RTC_DEBUG
Say yes here to enable debugging support in the RTC framework
and individual RTC drivers.
+config RTC_LIB_KUNIT_TEST
+ tristate "KUnit test for RTC lib functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test RTC library functions.
+
+ If unsure, say N.
+
config RTC_NVMEM
bool "RTC non volatile storage support"
select NVMEM
@@ -624,6 +633,7 @@ config RTC_DRV_FM3130
config RTC_DRV_RX8010
tristate "Epson RX8010SJ"
+ select REGMAP_I2C
help
If you say yes here you get support for the Epson RX8010SJ RTC
chip.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 2dd0dd956b0e..5ceeafe4d5b2 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -15,6 +15,8 @@ rtc-core-$(CONFIG_RTC_INTF_DEV) += dev.o
rtc-core-$(CONFIG_RTC_INTF_PROC) += proc.o
rtc-core-$(CONFIG_RTC_INTF_SYSFS) += sysfs.o
+obj-$(CONFIG_RTC_LIB_KUNIT_TEST) += lib_test.o
+
# Keep the list ordered.
obj-$(CONFIG_RTC_DRV_88PM80X) += rtc-88pm80x.o
diff --git a/drivers/rtc/lib.c b/drivers/rtc/lib.c
index 23284580df97..fe361652727a 100644
--- a/drivers/rtc/lib.c
+++ b/drivers/rtc/lib.c
@@ -6,6 +6,8 @@
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* based on arch/arm/common/rtctime.c and other bits
+ *
+ * Author: Cassio Neri <cassio.neri@gmail.com> (rtc_time64_to_tm)
*/
#include <linux/export.h>
@@ -22,8 +24,6 @@ static const unsigned short rtc_ydays[2][13] = {
{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
};
-#define LEAPS_THRU_END_OF(y) ((y) / 4 - (y) / 100 + (y) / 400)
-
/*
* The number of days in the month.
*/
@@ -42,42 +42,95 @@ int rtc_year_days(unsigned int day, unsigned int month, unsigned int year)
}
EXPORT_SYMBOL(rtc_year_days);
-/*
- * rtc_time64_to_tm - Converts time64_t to rtc_time.
- * Convert seconds since 01-01-1970 00:00:00 to Gregorian date.
+/**
+ * rtc_time64_to_tm - converts time64_t to rtc_time.
+ *
+ * @time: The number of seconds since 01-01-1970 00:00:00.
+ * (Must be positive.)
+ * @tm: Pointer to the struct rtc_time.
*/
void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
{
- unsigned int month, year, secs;
+ unsigned int secs;
int days;
+ u64 u64tmp;
+ u32 u32tmp, udays, century, day_of_century, year_of_century, year,
+ day_of_year, month, day;
+ bool is_Jan_or_Feb, is_leap_year;
+
/* time must be positive */
days = div_s64_rem(time, 86400, &secs);
/* day of the week, 1970-01-01 was a Thursday */
tm->tm_wday = (days + 4) % 7;
- year = 1970 + days / 365;
- days -= (year - 1970) * 365
- + LEAPS_THRU_END_OF(year - 1)
- - LEAPS_THRU_END_OF(1970 - 1);
- while (days < 0) {
- year -= 1;
- days += 365 + is_leap_year(year);
- }
- tm->tm_year = year - 1900;
- tm->tm_yday = days + 1;
-
- for (month = 0; month < 11; month++) {
- int newdays;
-
- newdays = days - rtc_month_days(month, year);
- if (newdays < 0)
- break;
- days = newdays;
- }
- tm->tm_mon = month;
- tm->tm_mday = days + 1;
+ /*
+ * The following algorithm is, basically, Proposition 6.3 of Neri
+ * and Schneider [1]. In a few words: it works on the computational
+ * (fictitious) calendar where the year starts in March, month = 2
+ * (*), and finishes in February, month = 13. This calendar is
+ * mathematically convenient because the day of the year does not
+ * depend on whether the year is leap or not. For instance:
+ *
+ * March 1st 0-th day of the year;
+ * ...
+ * April 1st 31-st day of the year;
+ * ...
+ * January 1st 306-th day of the year; (Important!)
+ * ...
+ * February 28th 364-th day of the year;
+ * February 29th 365-th day of the year (if it exists).
+ *
+ * After having worked out the date in the computational calendar
+ * (using just arithmetics) it's easy to convert it to the
+ * corresponding date in the Gregorian calendar.
+ *
+ * [1] "Euclidean Affine Functions and Applications to Calendar
+ * Algorithms". https://arxiv.org/abs/2102.06959
+ *
+ * (*) The numbering of months follows rtc_time more closely and
+ * thus, is slightly different from [1].
+ */
+
+ udays = ((u32) days) + 719468;
+
+ u32tmp = 4 * udays + 3;
+ century = u32tmp / 146097;
+ day_of_century = u32tmp % 146097 / 4;
+
+ u32tmp = 4 * day_of_century + 3;
+ u64tmp = 2939745ULL * u32tmp;
+ year_of_century = upper_32_bits(u64tmp);
+ day_of_year = lower_32_bits(u64tmp) / 2939745 / 4;
+
+ year = 100 * century + year_of_century;
+ is_leap_year = year_of_century != 0 ?
+ year_of_century % 4 == 0 : century % 4 == 0;
+
+ u32tmp = 2141 * day_of_year + 132377;
+ month = u32tmp >> 16;
+ day = ((u16) u32tmp) / 2141;
+
+ /*
+ * Recall that January 01 is the 306-th day of the year in the
+ * computational (not Gregorian) calendar.
+ */
+ is_Jan_or_Feb = day_of_year >= 306;
+
+ /* Converts to the Gregorian calendar. */
+ year = year + is_Jan_or_Feb;
+ month = is_Jan_or_Feb ? month - 12 : month;
+ day = day + 1;
+
+ day_of_year = is_Jan_or_Feb ?
+ day_of_year - 306 : day_of_year + 31 + 28 + is_leap_year;
+
+ /* Converts to rtc_time's format. */
+ tm->tm_year = (int) (year - 1900);
+ tm->tm_mon = (int) month;
+ tm->tm_mday = (int) day;
+ tm->tm_yday = (int) day_of_year + 1;
tm->tm_hour = secs / 3600;
secs -= tm->tm_hour * 3600;
diff --git a/drivers/rtc/lib_test.c b/drivers/rtc/lib_test.c
new file mode 100644
index 000000000000..d5caf36c56cd
--- /dev/null
+++ b/drivers/rtc/lib_test.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: LGPL-2.1+
+
+#include <kunit/test.h>
+#include <linux/rtc.h>
+
+/*
+ * Advance a date by one day.
+ */
+static void advance_date(int *year, int *month, int *mday, int *yday)
+{
+ if (*mday != rtc_month_days(*month - 1, *year)) {
+ ++*mday;
+ ++*yday;
+ return;
+ }
+
+ *mday = 1;
+ if (*month != 12) {
+ ++*month;
+ ++*yday;
+ return;
+ }
+
+ *month = 1;
+ *yday = 1;
+ ++*year;
+}
+
+/*
+ * Checks every day in a 160000 years interval starting on 1970-01-01
+ * against the expected result.
+ */
+static void rtc_time64_to_tm_test_date_range(struct kunit *test)
+{
+ /*
+ * 160000 years = (160000 / 400) * 400 years
+ * = (160000 / 400) * 146097 days
+ * = (160000 / 400) * 146097 * 86400 seconds
+ */
+ time64_t total_secs = ((time64_t) 160000) / 400 * 146097 * 86400;
+
+ int year = 1970;
+ int month = 1;
+ int mday = 1;
+ int yday = 1;
+
+ struct rtc_time result;
+ time64_t secs;
+ s64 days;
+
+ for (secs = 0; secs <= total_secs; secs += 86400) {
+
+ rtc_time64_to_tm(secs, &result);
+
+ days = div_s64(secs, 86400);
+
+ #define FAIL_MSG "%d/%02d/%02d (%2d) : %ld", \
+ year, month, mday, yday, days
+
+ KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, month - 1, result.tm_mon, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, mday, result.tm_mday, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, yday, result.tm_yday, FAIL_MSG);
+
+ advance_date(&year, &month, &mday, &yday);
+ }
+}
+
+static struct kunit_case rtc_lib_test_cases[] = {
+ KUNIT_CASE(rtc_time64_to_tm_test_date_range),
+ {}
+};
+
+static struct kunit_suite rtc_lib_test_suite = {
+ .name = "rtc_lib_test_cases",
+ .test_cases = rtc_lib_test_cases,
+};
+
+kunit_test_suite(rtc_lib_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 670fd8a2970e..eb15067a605e 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -229,19 +229,13 @@ static int cmos_read_time(struct device *dev, struct rtc_time *t)
if (!pm_trace_rtc_valid())
return -EIO;
- /* REVISIT: if the clock has a "century" register, use
- * that instead of the heuristic in mc146818_get_time().
- * That'll make Y3K compatility (year > 2070) easy!
- */
mc146818_get_time(t);
return 0;
}
static int cmos_set_time(struct device *dev, struct rtc_time *t)
{
- /* REVISIT: set the "century" register if available
- *
- * NOTE: this ignores the issue whereby updating the seconds
+ /* NOTE: this ignores the issue whereby updating the seconds
* takes effect exactly 500ms after we write the register.
* (Also queueing and other delays before we get this far.)
*/
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index c914091819ba..d38aaf08108c 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -60,14 +60,23 @@
#define RX8025_ADJ_DATA_MAX 62
#define RX8025_ADJ_DATA_MIN -62
+enum rx_model {
+ model_rx_unknown,
+ model_rx_8025,
+ model_rx_8035,
+ model_last
+};
+
static const struct i2c_device_id rx8025_id[] = {
- { "rx8025", 0 },
+ { "rx8025", model_rx_8025 },
+ { "rx8035", model_rx_8035 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rx8025_id);
struct rx8025_data {
struct rtc_device *rtc;
+ enum rx_model model;
u8 ctrl1;
};
@@ -100,10 +109,26 @@ static s32 rx8025_write_regs(const struct i2c_client *client,
length, values);
}
+static int rx8025_is_osc_stopped(enum rx_model model, int ctrl2)
+{
+ int xstp = ctrl2 & RX8025_BIT_CTRL2_XST;
+ /* XSTP bit has different polarity on RX-8025 vs RX-8035.
+ * RX-8025: 0 == oscillator stopped
+ * RX-8035: 1 == oscillator stopped
+ */
+
+ if (model == model_rx_8025)
+ xstp = !xstp;
+
+ return xstp;
+}
+
static int rx8025_check_validity(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
+ struct rx8025_data *drvdata = dev_get_drvdata(dev);
int ctrl2;
+ int xstp;
ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (ctrl2 < 0)
@@ -117,7 +142,8 @@ static int rx8025_check_validity(struct device *dev)
return -EINVAL;
}
- if (!(ctrl2 & RX8025_BIT_CTRL2_XST)) {
+ xstp = rx8025_is_osc_stopped(drvdata->model, ctrl2);
+ if (xstp) {
dev_warn(dev, "crystal stopped, date is invalid\n");
return -EINVAL;
}
@@ -127,6 +153,7 @@ static int rx8025_check_validity(struct device *dev)
static int rx8025_reset_validity(struct i2c_client *client)
{
+ struct rx8025_data *drvdata = i2c_get_clientdata(client);
int ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (ctrl2 < 0)
@@ -134,22 +161,28 @@ static int rx8025_reset_validity(struct i2c_client *client)
ctrl2 &= ~(RX8025_BIT_CTRL2_PON | RX8025_BIT_CTRL2_VDET);
+ if (drvdata->model == model_rx_8025)
+ ctrl2 |= RX8025_BIT_CTRL2_XST;
+ else
+ ctrl2 &= ~(RX8025_BIT_CTRL2_XST);
+
return rx8025_write_reg(client, RX8025_REG_CTRL2,
- ctrl2 | RX8025_BIT_CTRL2_XST);
+ ctrl2);
}
static irqreturn_t rx8025_handle_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
struct rx8025_data *rx8025 = i2c_get_clientdata(client);
- int status;
+ int status, xstp;
rtc_lock(rx8025->rtc);
status = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (status < 0)
goto out;
- if (!(status & RX8025_BIT_CTRL2_XST))
+ xstp = rx8025_is_osc_stopped(rx8025->model, status);
+ if (xstp)
dev_warn(&client->dev, "Oscillation stop was detected,"
"you may have to readjust the clock\n");
@@ -519,6 +552,9 @@ static int rx8025_probe(struct i2c_client *client,
i2c_set_clientdata(client, rx8025);
+ if (id)
+ rx8025->model = id->driver_data;
+
err = rx8025_init_client(client);
if (err)
return err;
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 6b56f8eacba6..fb9c6b709e13 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -204,15 +204,9 @@ static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data)
data[RTC_WEEKDAY] = 1 << tm->tm_wday;
data[RTC_DATE] = tm->tm_mday;
data[RTC_MONTH] = tm->tm_mon + 1;
- data[RTC_YEAR1] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
+ data[RTC_YEAR1] = tm->tm_year - 100;
- if (tm->tm_year < 100) {
- pr_err("RTC cannot handle the year %d\n",
- 1900 + tm->tm_year);
- return -EINVAL;
- } else {
- return 0;
- }
+ return 0;
}
/*
@@ -786,29 +780,35 @@ static int s5m_rtc_probe(struct platform_device *pdev)
if (ret)
return ret;
- device_init_wakeup(&pdev->dev, 1);
-
- info->rtc_dev = devm_rtc_device_register(&pdev->dev, "s5m-rtc",
- &s5m_rtc_ops, THIS_MODULE);
-
+ info->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc_dev))
return PTR_ERR(info->rtc_dev);
- if (!info->irq) {
- dev_info(&pdev->dev, "Alarm IRQ not available\n");
- return 0;
+ info->rtc_dev->ops = &s5m_rtc_ops;
+
+ if (info->device_type == S5M8763X) {
+ info->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_0000;
+ info->rtc_dev->range_max = RTC_TIMESTAMP_END_9999;
+ } else {
+ info->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ info->rtc_dev->range_max = RTC_TIMESTAMP_END_2099;
}
- ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
- s5m_rtc_alarm_irq, 0, "rtc-alarm0",
- info);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
- info->irq, ret);
- return ret;
+ if (!info->irq) {
+ clear_bit(RTC_FEATURE_ALARM, info->rtc_dev->features);
+ } else {
+ ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
+ s5m_rtc_alarm_irq, 0, "rtc-alarm0",
+ info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
+ info->irq, ret);
+ return ret;
+ }
+ device_init_wakeup(&pdev->dev, 1);
}
- return 0;
+ return devm_rtc_register_device(info->rtc_dev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index bc89c62ccb9b..75e4c2d777b9 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -467,6 +467,6 @@ static struct platform_driver tps65910_rtc_driver = {
};
module_platform_driver(tps65910_rtc_driver);
-MODULE_ALIAS("platform:rtc-tps65910");
+MODULE_ALIAS("platform:tps65910-rtc");
MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@nvidia.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 376f1efbbb86..d0416dbd0cd8 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -2,17 +2,6 @@
comment "S/390 block device drivers"
depends on S390 && BLOCK
-config BLK_DEV_XPRAM
- def_tristate m
- prompt "XPRAM disk support"
- depends on S390 && BLOCK
- help
- Select this option if you want to use your expanded storage on S/390
- or zSeries as a disk. This is useful as a _fast_ swap device if you
- want to access more than 2G of memory when running in 31 bit mode.
- This option is also available as a module which will be called
- xpram. If unsure, say "N".
-
config DCSSBLK
def_tristate m
select FS_DAX_LIMITED
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index 60c85cff556f..a0a54d2f063f 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_DASD) += dasd_mod.o
obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
-obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
obj-$(CONFIG_DCSSBLK) += dcssblk.o
scm_block-objs := scm_drv.o scm_blk.o
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
deleted file mode 100644
index ce98fab4d43c..000000000000
--- a/drivers/s390/block/xpram.c
+++ /dev/null
@@ -1,416 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Xpram.c -- the S/390 expanded memory RAM-disk
- *
- * significant parts of this code are based on
- * the sbull device driver presented in
- * A. Rubini: Linux Device Drivers
- *
- * Author of XPRAM specific coding: Reinhard Buendgen
- * buendgen@de.ibm.com
- * Rewrite for 2.5: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * External interfaces:
- * Interfaces to linux kernel
- * xpram_setup: read kernel parameters
- * Device specific file operations
- * xpram_iotcl
- * xpram_open
- *
- * "ad-hoc" partitioning:
- * the expanded memory can be partitioned among several devices
- * (with different minors). The partitioning set up can be
- * set by kernel or module parameters (int devs & int sizes[])
- *
- * Potential future improvements:
- * generic hard disk support to replace ad-hoc partitioning
- */
-
-#define KMSG_COMPONENT "xpram"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/ctype.h> /* isdigit, isxdigit */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-#include <linux/blkpg.h>
-#include <linux/hdreg.h> /* HDIO_GETGEO */
-#include <linux/device.h>
-#include <linux/bio.h>
-#include <linux/gfp.h>
-#include <linux/uaccess.h>
-
-#define XPRAM_NAME "xpram"
-#define XPRAM_DEVS 1 /* one partition */
-#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */
-
-typedef struct {
- unsigned int size; /* size of xpram segment in pages */
- unsigned int offset; /* start page of xpram segment */
-} xpram_device_t;
-
-static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
-static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
-static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
-static unsigned int xpram_pages;
-static int xpram_devs;
-
-/*
- * Parameter parsing functions.
- */
-static int devs = XPRAM_DEVS;
-static char *sizes[XPRAM_MAX_DEVS];
-
-module_param(devs, int, 0);
-module_param_array(sizes, charp, NULL, 0);
-
-MODULE_PARM_DESC(devs, "number of devices (\"partitions\"), " \
- "the default is " __MODULE_STRING(XPRAM_DEVS) "\n");
-MODULE_PARM_DESC(sizes, "list of device (partition) sizes " \
- "the defaults are 0s \n" \
- "All devices with size 0 equally partition the "
- "remaining space on the expanded strorage not "
- "claimed by explicit sizes\n");
-MODULE_LICENSE("GPL");
-
-/*
- * Copy expanded memory page (4kB) into main memory
- * Arguments
- * page_addr: address of target page
- * xpage_index: index of expandeded memory page
- * Return value
- * 0: if operation succeeds
- * -EIO: if pgin failed
- * -ENXIO: if xpram has vanished
- */
-static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
-{
- int cc = 2; /* return unused cc 2 if pgin traps */
-
- asm volatile(
- " .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
- if (cc == 3)
- return -ENXIO;
- if (cc == 2)
- return -ENXIO;
- if (cc == 1)
- return -EIO;
- return 0;
-}
-
-/*
- * Copy a 4kB page of main memory to an expanded memory page
- * Arguments
- * page_addr: address of source page
- * xpage_index: index of expandeded memory page
- * Return value
- * 0: if operation succeeds
- * -EIO: if pgout failed
- * -ENXIO: if xpram has vanished
- */
-static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
-{
- int cc = 2; /* return unused cc 2 if pgin traps */
-
- asm volatile(
- " .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
- if (cc == 3)
- return -ENXIO;
- if (cc == 2)
- return -ENXIO;
- if (cc == 1)
- return -EIO;
- return 0;
-}
-
-/*
- * Check if xpram is available.
- */
-static int __init xpram_present(void)
-{
- unsigned long mem_page;
- int rc;
-
- mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
- if (!mem_page)
- return -ENOMEM;
- rc = xpram_page_in(mem_page, 0);
- free_page(mem_page);
- return rc ? -ENXIO : 0;
-}
-
-/*
- * Return index of the last available xpram page.
- */
-static unsigned long __init xpram_highest_page_index(void)
-{
- unsigned int page_index, add_bit;
- unsigned long mem_page;
-
- mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
- if (!mem_page)
- return 0;
-
- page_index = 0;
- add_bit = 1ULL << (sizeof(unsigned int)*8 - 1);
- while (add_bit > 0) {
- if (xpram_page_in(mem_page, page_index | add_bit) == 0)
- page_index |= add_bit;
- add_bit >>= 1;
- }
-
- free_page (mem_page);
-
- return page_index;
-}
-
-/*
- * Block device make request function.
- */
-static blk_qc_t xpram_submit_bio(struct bio *bio)
-{
- xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
- struct bio_vec bvec;
- struct bvec_iter iter;
- unsigned int index;
- unsigned long page_addr;
- unsigned long bytes;
-
- blk_queue_split(&bio);
-
- if ((bio->bi_iter.bi_sector & 7) != 0 ||
- (bio->bi_iter.bi_size & 4095) != 0)
- /* Request is not page-aligned. */
- goto fail;
- if ((bio->bi_iter.bi_size >> 12) > xdev->size)
- /* Request size is no page-aligned. */
- goto fail;
- if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
- goto fail;
- index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
- bio_for_each_segment(bvec, bio, iter) {
- page_addr = (unsigned long)
- kmap(bvec.bv_page) + bvec.bv_offset;
- bytes = bvec.bv_len;
- if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
- /* More paranoia. */
- goto fail;
- while (bytes > 0) {
- if (bio_data_dir(bio) == READ) {
- if (xpram_page_in(page_addr, index) != 0)
- goto fail;
- } else {
- if (xpram_page_out(page_addr, index) != 0)
- goto fail;
- }
- page_addr += 4096;
- bytes -= 4096;
- index++;
- }
- }
- bio_endio(bio);
- return BLK_QC_T_NONE;
-fail:
- bio_io_error(bio);
- return BLK_QC_T_NONE;
-}
-
-static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- unsigned long size;
-
- /*
- * get geometry: we have to fake one... trim the size to a
- * multiple of 64 (32k): tell we have 16 sectors, 4 heads,
- * whatever cylinders. Tell also that data starts at sector. 4.
- */
- size = (xpram_pages * 8) & ~0x3f;
- geo->cylinders = size >> 6;
- geo->heads = 4;
- geo->sectors = 16;
- geo->start = 4;
- return 0;
-}
-
-static const struct block_device_operations xpram_devops =
-{
- .owner = THIS_MODULE,
- .submit_bio = xpram_submit_bio,
- .getgeo = xpram_getgeo,
-};
-
-/*
- * Setup xpram_sizes array.
- */
-static int __init xpram_setup_sizes(unsigned long pages)
-{
- unsigned long mem_needed;
- unsigned long mem_auto;
- unsigned long long size;
- char *sizes_end;
- int mem_auto_no;
- int i;
-
- /* Check number of devices. */
- if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
- pr_err("%d is not a valid number of XPRAM devices\n",devs);
- return -EINVAL;
- }
- xpram_devs = devs;
-
- /*
- * Copy sizes array to xpram_sizes and align partition
- * sizes to page boundary.
- */
- mem_needed = 0;
- mem_auto_no = 0;
- for (i = 0; i < xpram_devs; i++) {
- if (sizes[i]) {
- size = simple_strtoull(sizes[i], &sizes_end, 0);
- switch (*sizes_end) {
- case 'g':
- case 'G':
- size <<= 20;
- break;
- case 'm':
- case 'M':
- size <<= 10;
- }
- xpram_sizes[i] = (size + 3) & -4UL;
- }
- if (xpram_sizes[i])
- mem_needed += xpram_sizes[i];
- else
- mem_auto_no++;
- }
-
- pr_info(" number of devices (partitions): %d \n", xpram_devs);
- for (i = 0; i < xpram_devs; i++) {
- if (xpram_sizes[i])
- pr_info(" size of partition %d: %u kB\n",
- i, xpram_sizes[i]);
- else
- pr_info(" size of partition %d to be set "
- "automatically\n",i);
- }
- pr_info(" memory needed (for sized partitions): %lu kB\n",
- mem_needed);
- pr_info(" partitions to be sized automatically: %d\n",
- mem_auto_no);
-
- if (mem_needed > pages * 4) {
- pr_err("Not enough expanded memory available\n");
- return -EINVAL;
- }
-
- /*
- * partitioning:
- * xpram_sizes[i] != 0; partition i has size xpram_sizes[i] kB
- * else: ; all partitions with zero xpram_sizes[i]
- * partition equally the remaining space
- */
- if (mem_auto_no) {
- mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
- pr_info(" automatically determined "
- "partition size: %lu kB\n", mem_auto);
- for (i = 0; i < xpram_devs; i++)
- if (xpram_sizes[i] == 0)
- xpram_sizes[i] = mem_auto;
- }
- return 0;
-}
-
-static int __init xpram_setup_blkdev(void)
-{
- unsigned long offset;
- int i, rc = -ENOMEM;
-
- for (i = 0; i < xpram_devs; i++) {
- xpram_disks[i] = blk_alloc_disk(NUMA_NO_NODE);
- if (!xpram_disks[i])
- goto out;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_disks[i]->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM,
- xpram_disks[i]->queue);
- blk_queue_logical_block_size(xpram_disks[i]->queue, 4096);
- }
-
- /*
- * Register xpram major.
- */
- rc = register_blkdev(XPRAM_MAJOR, XPRAM_NAME);
- if (rc < 0)
- goto out;
-
- /*
- * Setup device structures.
- */
- offset = 0;
- for (i = 0; i < xpram_devs; i++) {
- struct gendisk *disk = xpram_disks[i];
-
- xpram_devices[i].size = xpram_sizes[i] / 4;
- xpram_devices[i].offset = offset;
- offset += xpram_devices[i].size;
- disk->major = XPRAM_MAJOR;
- disk->first_minor = i;
- disk->minors = 1;
- disk->fops = &xpram_devops;
- disk->private_data = &xpram_devices[i];
- sprintf(disk->disk_name, "slram%d", i);
- set_capacity(disk, xpram_sizes[i] << 1);
- add_disk(disk);
- }
-
- return 0;
-out:
- while (i--)
- blk_cleanup_disk(xpram_disks[i]);
- return rc;
-}
-
-/*
- * Finally, the init/exit functions.
- */
-static void __exit xpram_exit(void)
-{
- int i;
- for (i = 0; i < xpram_devs; i++) {
- del_gendisk(xpram_disks[i]);
- blk_cleanup_disk(xpram_disks[i]);
- }
- unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
-}
-
-static int __init xpram_init(void)
-{
- int rc;
-
- /* Find out size of expanded memory. */
- if (xpram_present() != 0) {
- pr_err("No expanded memory available\n");
- return -ENODEV;
- }
- xpram_pages = xpram_highest_page_index() + 1;
- pr_info(" %u pages expanded memory found (%lu KB).\n",
- xpram_pages, (unsigned long) xpram_pages*4);
- rc = xpram_setup_sizes(xpram_pages);
- if (rc)
- return rc;
- return xpram_setup_blkdev();
-}
-
-module_init(xpram_init);
-module_exit(xpram_exit);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 87cdbace1453..e4592890f20a 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -292,13 +292,15 @@ con3270_update(struct timer_list *t)
* Read tasklet.
*/
static void
-con3270_read_tasklet(struct raw3270_request *rrq)
+con3270_read_tasklet(unsigned long data)
{
static char kreset_data = TW_KR;
+ struct raw3270_request *rrq;
struct con3270 *cp;
unsigned long flags;
int nr_up, deactivate;
+ rrq = (struct raw3270_request *)data;
cp = (struct con3270 *) rrq->view;
spin_lock_irqsave(&cp->view.lock, flags);
nr_up = cp->nr_up;
@@ -625,8 +627,7 @@ con3270_init(void)
INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update);
timer_setup(&condev->timer, con3270_update, 0);
- tasklet_init(&condev->readlet,
- (void (*)(unsigned long)) con3270_read_tasklet,
+ tasklet_init(&condev->readlet, con3270_read_tasklet,
(unsigned long) condev->read);
raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index e1686a69a68e..6f2b64040078 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -34,12 +34,13 @@ void schedule_sysrq_work(struct sysrq_work *sw)
/**
- * Check for special chars at start of input.
+ * ctrlchar_handle - check for special chars at start of input
*
- * @param buf Console input buffer.
- * @param len Length of valid data in buffer.
- * @param tty The tty struct for this console.
- * @return CTRLCHAR_NONE, if nothing matched,
+ * @buf: console input buffer
+ * @len: length of valid data in buffer
+ * @tty: the tty struct for this console
+ *
+ * Return: CTRLCHAR_NONE, if nothing matched,
* CTRLCHAR_SYSRQ, if sysrq was encountered
* otherwise char to be inserted logically or'ed
* with CTRLCHAR_CTRL
diff --git a/drivers/s390/char/hmcdrv_ftp.c b/drivers/s390/char/hmcdrv_ftp.c
index 37ee8f698c3b..02b6f394aec2 100644
--- a/drivers/s390/char/hmcdrv_ftp.c
+++ b/drivers/s390/char/hmcdrv_ftp.c
@@ -26,7 +26,7 @@
* struct hmcdrv_ftp_ops - HMC drive FTP operations
* @startup: startup function
* @shutdown: shutdown function
- * @cmd: FTP transfer function
+ * @transfer: FTP transfer function
*/
struct hmcdrv_ftp_ops {
int (*startup)(void);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index b4b84e3e0949..2cf7fe131ece 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -28,7 +28,7 @@
#define SCLP_HEADER "sclp: "
struct sclp_trace_entry {
- char id[4];
+ char id[4] __nonstring;
u32 a;
u64 b;
};
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 4dd2eb634856..f3c656975e05 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -262,7 +262,10 @@ static int blacklist_parse_proc_parameters(char *buf)
if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0);
- css_schedule_eval_all_unreg(0);
+ /* There could be subchannels without proper devices connected.
+ * evaluate all the entries
+ */
+ css_schedule_eval_all();
} else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index adf33b653d87..8d14569823d7 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -867,19 +867,6 @@ out_err:
wake_up(&ccw_device_init_wq);
}
-static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
-{
- struct subchannel *sch;
-
- /* Get subchannel reference for local processing. */
- if (!get_device(cdev->dev.parent))
- return;
- sch = to_subchannel(cdev->dev.parent);
- css_sch_device_unregister(sch);
- /* Release subchannel reference for local processing. */
- put_device(&sch->dev);
-}
-
/*
* subchannel recognition done. Called from the state machine.
*/
@@ -1857,10 +1844,10 @@ static void ccw_device_todo(struct work_struct *work)
css_schedule_eval(sch->schid);
fallthrough;
case CDEV_TODO_UNREG:
- if (sch_is_pseudo_sch(sch))
- ccw_device_unregister(cdev);
- else
- ccw_device_call_sch_unregister(cdev);
+ spin_lock_irq(sch->lock);
+ sch_set_cdev(sch, NULL);
+ spin_unlock_irq(sch->lock);
+ ccw_device_unregister(cdev);
break;
default:
break;
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 740996d0dc8c..7835a87a60b5 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -91,7 +91,7 @@ static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
}
/**
- * diag_get_dev_info - retrieve device information via diag 0x210
+ * diag210_get_dev_info - retrieve device information via diag 0x210
* @cdev: ccw device
*
* Returns zero on success, non-zero otherwise.
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index c57d2a7f0919..7f540ad0b568 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -159,7 +159,7 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
return 0;
}
-static int vfio_ccw_mdev_open(struct mdev_device *mdev)
+static int vfio_ccw_mdev_open_device(struct mdev_device *mdev)
{
struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev));
@@ -194,7 +194,7 @@ out_unregister:
return ret;
}
-static void vfio_ccw_mdev_release(struct mdev_device *mdev)
+static void vfio_ccw_mdev_close_device(struct mdev_device *mdev)
{
struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev));
@@ -638,8 +638,8 @@ static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
.supported_type_groups = mdev_type_groups,
.create = vfio_ccw_mdev_create,
.remove = vfio_ccw_mdev_remove,
- .open = vfio_ccw_mdev_open,
- .release = vfio_ccw_mdev_release,
+ .open_device = vfio_ccw_mdev_open_device,
+ .close_device = vfio_ccw_mdev_close_device,
.read = vfio_ccw_mdev_read,
.write = vfio_ccw_mdev_write,
.ioctl = vfio_ccw_mdev_ioctl,
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 67f145589f58..118939a7729a 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -24,8 +24,9 @@
#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
-static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
+static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
+static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
static int match_apqn(struct device *dev, const void *data)
{
@@ -295,15 +296,6 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
struct ap_matrix_mdev, pqap_hook);
- /*
- * If the KVM pointer is in the process of being set, wait until the
- * process has completed.
- */
- wait_event_cmd(matrix_mdev->wait_for_kvm,
- !matrix_mdev->kvm_busy,
- mutex_unlock(&matrix_dev->lock),
- mutex_lock(&matrix_dev->lock));
-
/* If the there is no guest using the mdev, there is nothing to do */
if (!matrix_mdev->kvm)
goto out_unlock;
@@ -336,45 +328,57 @@ static void vfio_ap_matrix_init(struct ap_config_info *info,
matrix->adm_max = info->apxa ? info->Nd : 15;
}
-static int vfio_ap_mdev_create(struct mdev_device *mdev)
+static int vfio_ap_mdev_probe(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev;
+ int ret;
if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
return -EPERM;
matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
if (!matrix_mdev) {
- atomic_inc(&matrix_dev->available_instances);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_dec_available;
}
+ vfio_init_group_dev(&matrix_mdev->vdev, &mdev->dev,
+ &vfio_ap_matrix_dev_ops);
matrix_mdev->mdev = mdev;
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
- init_waitqueue_head(&matrix_mdev->wait_for_kvm);
- mdev_set_drvdata(mdev, matrix_mdev);
- matrix_mdev->pqap_hook.hook = handle_pqap;
- matrix_mdev->pqap_hook.owner = THIS_MODULE;
+ matrix_mdev->pqap_hook = handle_pqap;
mutex_lock(&matrix_dev->lock);
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
mutex_unlock(&matrix_dev->lock);
+ ret = vfio_register_group_dev(&matrix_mdev->vdev);
+ if (ret)
+ goto err_list;
+ dev_set_drvdata(&mdev->dev, matrix_mdev);
return 0;
+
+err_list:
+ mutex_lock(&matrix_dev->lock);
+ list_del(&matrix_mdev->node);
+ mutex_unlock(&matrix_dev->lock);
+ kfree(matrix_mdev);
+err_dec_available:
+ atomic_inc(&matrix_dev->available_instances);
+ return ret;
}
-static int vfio_ap_mdev_remove(struct mdev_device *mdev)
+static void vfio_ap_mdev_remove(struct mdev_device *mdev)
{
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
+
+ vfio_unregister_group_dev(&matrix_mdev->vdev);
mutex_lock(&matrix_dev->lock);
- vfio_ap_mdev_reset_queues(mdev);
+ vfio_ap_mdev_reset_queues(matrix_mdev);
list_del(&matrix_mdev->node);
kfree(matrix_mdev);
- mdev_set_drvdata(mdev, NULL);
atomic_inc(&matrix_dev->available_instances);
mutex_unlock(&matrix_dev->lock);
-
- return 0;
}
static ssize_t name_show(struct mdev_type *mtype,
@@ -614,16 +618,12 @@ static ssize_t assign_adapter_store(struct device *dev,
{
int ret;
unsigned long apid;
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
mutex_lock(&matrix_dev->lock);
- /*
- * If the KVM pointer is in flux or the guest is running, disallow
- * un-assignment of adapter
- */
- if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
+ /* If the KVM guest is running, disallow assignment of adapter */
+ if (matrix_mdev->kvm) {
ret = -EBUSY;
goto done;
}
@@ -685,16 +685,12 @@ static ssize_t unassign_adapter_store(struct device *dev,
{
int ret;
unsigned long apid;
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
mutex_lock(&matrix_dev->lock);
- /*
- * If the KVM pointer is in flux or the guest is running, disallow
- * un-assignment of adapter
- */
- if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
+ /* If the KVM guest is running, disallow unassignment of adapter */
+ if (matrix_mdev->kvm) {
ret = -EBUSY;
goto done;
}
@@ -773,17 +769,13 @@ static ssize_t assign_domain_store(struct device *dev,
{
int ret;
unsigned long apqi;
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
mutex_lock(&matrix_dev->lock);
- /*
- * If the KVM pointer is in flux or the guest is running, disallow
- * assignment of domain
- */
- if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
+ /* If the KVM guest is running, disallow assignment of domain */
+ if (matrix_mdev->kvm) {
ret = -EBUSY;
goto done;
}
@@ -840,16 +832,12 @@ static ssize_t unassign_domain_store(struct device *dev,
{
int ret;
unsigned long apqi;
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
mutex_lock(&matrix_dev->lock);
- /*
- * If the KVM pointer is in flux or the guest is running, disallow
- * un-assignment of domain
- */
- if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
+ /* If the KVM guest is running, disallow unassignment of domain */
+ if (matrix_mdev->kvm) {
ret = -EBUSY;
goto done;
}
@@ -893,16 +881,12 @@ static ssize_t assign_control_domain_store(struct device *dev,
{
int ret;
unsigned long id;
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
mutex_lock(&matrix_dev->lock);
- /*
- * If the KVM pointer is in flux or the guest is running, disallow
- * assignment of control domain.
- */
- if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
+ /* If the KVM guest is running, disallow assignment of control domain */
+ if (matrix_mdev->kvm) {
ret = -EBUSY;
goto done;
}
@@ -949,17 +933,13 @@ static ssize_t unassign_control_domain_store(struct device *dev,
{
int ret;
unsigned long domid;
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
unsigned long max_domid = matrix_mdev->matrix.adm_max;
mutex_lock(&matrix_dev->lock);
- /*
- * If the KVM pointer is in flux or the guest is running, disallow
- * un-assignment of control domain.
- */
- if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
+ /* If a KVM guest is running, disallow unassignment of control domain */
+ if (matrix_mdev->kvm) {
ret = -EBUSY;
goto done;
}
@@ -988,8 +968,7 @@ static ssize_t control_domains_show(struct device *dev,
int nchars = 0;
int n;
char *bufpos = buf;
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
unsigned long max_domid = matrix_mdev->matrix.adm_max;
mutex_lock(&matrix_dev->lock);
@@ -1007,8 +986,7 @@ static DEVICE_ATTR_RO(control_domains);
static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
char *bufpos = buf;
unsigned long apid;
unsigned long apqi;
@@ -1098,23 +1076,30 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
struct ap_matrix_mdev *m;
if (kvm->arch.crypto.crycbd) {
+ down_write(&kvm->arch.crypto.pqap_hook_rwsem);
+ kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
+ up_write(&kvm->arch.crypto.pqap_hook_rwsem);
+
+ mutex_lock(&kvm->lock);
+ mutex_lock(&matrix_dev->lock);
+
list_for_each_entry(m, &matrix_dev->mdev_list, node) {
- if (m != matrix_mdev && m->kvm == kvm)
+ if (m != matrix_mdev && m->kvm == kvm) {
+ mutex_unlock(&kvm->lock);
+ mutex_unlock(&matrix_dev->lock);
return -EPERM;
+ }
}
kvm_get_kvm(kvm);
- matrix_mdev->kvm_busy = true;
- mutex_unlock(&matrix_dev->lock);
+ matrix_mdev->kvm = kvm;
kvm_arch_crypto_set_masks(kvm,
matrix_mdev->matrix.apm,
matrix_mdev->matrix.aqm,
matrix_mdev->matrix.adm);
- mutex_lock(&matrix_dev->lock);
- kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
- matrix_mdev->kvm = kvm;
- matrix_mdev->kvm_busy = false;
- wake_up_all(&matrix_mdev->wait_for_kvm);
+
+ mutex_unlock(&kvm->lock);
+ mutex_unlock(&matrix_dev->lock);
}
return 0;
@@ -1163,28 +1148,24 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
* certain circumstances, will result in a circular lock dependency if this is
* done under the @matrix_mdev->lock.
*/
-static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
+static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev,
+ struct kvm *kvm)
{
- /*
- * If the KVM pointer is in the process of being set, wait until the
- * process has completed.
- */
- wait_event_cmd(matrix_mdev->wait_for_kvm,
- !matrix_mdev->kvm_busy,
- mutex_unlock(&matrix_dev->lock),
- mutex_lock(&matrix_dev->lock));
+ if (kvm && kvm->arch.crypto.crycbd) {
+ down_write(&kvm->arch.crypto.pqap_hook_rwsem);
+ kvm->arch.crypto.pqap_hook = NULL;
+ up_write(&kvm->arch.crypto.pqap_hook_rwsem);
- if (matrix_mdev->kvm) {
- matrix_mdev->kvm_busy = true;
- mutex_unlock(&matrix_dev->lock);
- kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ mutex_lock(&kvm->lock);
mutex_lock(&matrix_dev->lock);
- vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
- matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
- kvm_put_kvm(matrix_mdev->kvm);
+
+ kvm_arch_crypto_clear_masks(kvm);
+ vfio_ap_mdev_reset_queues(matrix_mdev);
+ kvm_put_kvm(kvm);
matrix_mdev->kvm = NULL;
- matrix_mdev->kvm_busy = false;
- wake_up_all(&matrix_mdev->wait_for_kvm);
+
+ mutex_unlock(&kvm->lock);
+ mutex_unlock(&matrix_dev->lock);
}
}
@@ -1197,16 +1178,13 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
if (action != VFIO_GROUP_NOTIFY_SET_KVM)
return NOTIFY_OK;
- mutex_lock(&matrix_dev->lock);
matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
if (!data)
- vfio_ap_mdev_unset_kvm(matrix_mdev);
+ vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm);
else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
notify_rc = NOTIFY_DONE;
- mutex_unlock(&matrix_dev->lock);
-
return notify_rc;
}
@@ -1276,13 +1254,12 @@ free_resources:
return ret;
}
-static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
+static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev)
{
int ret;
int rc = 0;
unsigned long apid, apqi;
struct vfio_ap_queue *q;
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
matrix_mdev->matrix.apm_max + 1) {
@@ -1303,52 +1280,45 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
return rc;
}
-static int vfio_ap_mdev_open(struct mdev_device *mdev)
+static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
{
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev =
+ container_of(vdev, struct ap_matrix_mdev, vdev);
unsigned long events;
int ret;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier;
events = VFIO_GROUP_NOTIFY_SET_KVM;
- ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ ret = vfio_register_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
&events, &matrix_mdev->group_notifier);
- if (ret) {
- module_put(THIS_MODULE);
+ if (ret)
return ret;
- }
matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ ret = vfio_register_notifier(vdev->dev, VFIO_IOMMU_NOTIFY,
&events, &matrix_mdev->iommu_notifier);
- if (!ret)
- return ret;
+ if (ret)
+ goto out_unregister_group;
+ return 0;
- vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+out_unregister_group:
+ vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
&matrix_mdev->group_notifier);
- module_put(THIS_MODULE);
return ret;
}
-static void vfio_ap_mdev_release(struct mdev_device *mdev)
+static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
{
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
-
- mutex_lock(&matrix_dev->lock);
- vfio_ap_mdev_unset_kvm(matrix_mdev);
- mutex_unlock(&matrix_dev->lock);
+ struct ap_matrix_mdev *matrix_mdev =
+ container_of(vdev, struct ap_matrix_mdev, vdev);
- vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY,
&matrix_mdev->iommu_notifier);
- vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
&matrix_mdev->group_notifier);
- module_put(THIS_MODULE);
+ vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm);
}
static int vfio_ap_mdev_get_device_info(unsigned long arg)
@@ -1371,11 +1341,12 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg)
return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
}
-static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
+static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
unsigned int cmd, unsigned long arg)
{
+ struct ap_matrix_mdev *matrix_mdev =
+ container_of(vdev, struct ap_matrix_mdev, vdev);
int ret;
- struct ap_matrix_mdev *matrix_mdev;
mutex_lock(&matrix_dev->lock);
switch (cmd) {
@@ -1383,22 +1354,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
ret = vfio_ap_mdev_get_device_info(arg);
break;
case VFIO_DEVICE_RESET:
- matrix_mdev = mdev_get_drvdata(mdev);
- if (WARN(!matrix_mdev, "Driver data missing from mdev!!")) {
- ret = -EINVAL;
- break;
- }
-
- /*
- * If the KVM pointer is in the process of being set, wait until
- * the process has completed.
- */
- wait_event_cmd(matrix_mdev->wait_for_kvm,
- !matrix_mdev->kvm_busy,
- mutex_unlock(&matrix_dev->lock),
- mutex_lock(&matrix_dev->lock));
-
- ret = vfio_ap_mdev_reset_queues(mdev);
+ ret = vfio_ap_mdev_reset_queues(matrix_mdev);
break;
default:
ret = -EOPNOTSUPP;
@@ -1409,25 +1365,51 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
return ret;
}
+static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
+ .open_device = vfio_ap_mdev_open_device,
+ .close_device = vfio_ap_mdev_close_device,
+ .ioctl = vfio_ap_mdev_ioctl,
+};
+
+static struct mdev_driver vfio_ap_matrix_driver = {
+ .driver = {
+ .name = "vfio_ap_mdev",
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ .dev_groups = vfio_ap_mdev_attr_groups,
+ },
+ .probe = vfio_ap_mdev_probe,
+ .remove = vfio_ap_mdev_remove,
+};
+
static const struct mdev_parent_ops vfio_ap_matrix_ops = {
.owner = THIS_MODULE,
+ .device_driver = &vfio_ap_matrix_driver,
.supported_type_groups = vfio_ap_mdev_type_groups,
- .mdev_attr_groups = vfio_ap_mdev_attr_groups,
- .create = vfio_ap_mdev_create,
- .remove = vfio_ap_mdev_remove,
- .open = vfio_ap_mdev_open,
- .release = vfio_ap_mdev_release,
- .ioctl = vfio_ap_mdev_ioctl,
};
int vfio_ap_mdev_register(void)
{
+ int ret;
+
atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
- return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
+ ret = mdev_register_driver(&vfio_ap_matrix_driver);
+ if (ret)
+ return ret;
+
+ ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
+ if (ret)
+ goto err_driver;
+ return 0;
+
+err_driver:
+ mdev_unregister_driver(&vfio_ap_matrix_driver);
+ return ret;
}
void vfio_ap_mdev_unregister(void)
{
mdev_unregister_device(&matrix_dev->device);
+ mdev_unregister_driver(&vfio_ap_matrix_driver);
}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index f82a6396acae..77760e2b546f 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/kvm_host.h>
+#include <linux/vfio.h>
#include "ap_bus.h"
@@ -79,14 +80,13 @@ struct ap_matrix {
* @kvm: the struct holding guest's state
*/
struct ap_matrix_mdev {
+ struct vfio_device vdev;
struct list_head node;
struct ap_matrix matrix;
struct notifier_block group_notifier;
struct notifier_block iommu_notifier;
- bool kvm_busy;
- wait_queue_head_t wait_for_kvm;
struct kvm *kvm;
- struct kvm_s390_module_hook pqap_hook;
+ crypto_hook pqap_hook;
struct mdev_device *mdev;
};
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index fa0cb8633040..356318746dd1 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -71,7 +71,7 @@ static LIST_HEAD(zcrypt_ops_list);
/* Zcrypt related debug feature stuff. */
debug_info_t *zcrypt_dbf_info;
-/**
+/*
* Process a rescan of the transport layer.
*
* Returns 1, if the rescan has been processed, otherwise 0.
@@ -462,7 +462,7 @@ static void zcdn_destroy_all(void)
#endif
-/**
+/*
* zcrypt_read (): Not supported beyond zcrypt 1.3.1.
*
* This function is not supported beyond zcrypt 1.3.1.
@@ -473,7 +473,7 @@ static ssize_t zcrypt_read(struct file *filp, char __user *buf,
return -EPERM;
}
-/**
+/*
* zcrypt_write(): Not allowed.
*
* Write is is not allowed
@@ -484,7 +484,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
return -EPERM;
}
-/**
+/*
* zcrypt_open(): Count number of users.
*
* Device open function to count number of users.
@@ -512,7 +512,7 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
return stream_open(inode, filp);
}
-/**
+/*
* zcrypt_release(): Count number of users.
*
* Device close function to count number of users.
@@ -2153,7 +2153,7 @@ static void zcdn_exit(void)
#endif
-/**
+/*
* zcrypt_api_init(): Module initialization.
*
* The module initialization code.
@@ -2191,7 +2191,7 @@ out:
return rc;
}
-/**
+/*
* zcrypt_api_exit(): Module termination.
*
* The module termination code.
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index fa8293d37006..2bd49950ba81 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -65,7 +65,7 @@ static struct ap_device_id zcrypt_cex2a_queue_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids);
-/**
+/*
* Probe function for CEX2A card devices. It always accepts the AP device
* since the bus_match already checked the card type.
* @ap_dev: pointer to the AP device.
@@ -124,7 +124,7 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX2A card driver information
* if an AP card device is removed.
*/
@@ -142,7 +142,7 @@ static struct ap_driver zcrypt_cex2a_card_driver = {
.flags = AP_DRIVER_FLAG_DEFAULT,
};
-/**
+/*
* Probe function for CEX2A queue devices. It always accepts the AP device
* since the bus_match already checked the queue type.
* @ap_dev: pointer to the AP device.
@@ -183,7 +183,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX2A queue driver information
* if an AP queue device is removed.
*/
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index a0b9f1153e12..6360fdd06160 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -171,7 +171,7 @@ static const struct attribute_group cca_queue_attr_grp = {
.attrs = cca_queue_attrs,
};
-/**
+/*
* Large random number detection function. Its sends a message to a CEX2C/CEX3C
* card to find out if large random numbers are supported.
* @ap_dev: pointer to the AP device.
@@ -237,7 +237,7 @@ out_free:
return rc;
}
-/**
+/*
* Probe function for CEX2C/CEX3C card devices. It always accepts the
* AP device since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP card device.
@@ -303,7 +303,7 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX2C/CEX3C card driver information
* if an AP card device is removed.
*/
@@ -325,7 +325,7 @@ static struct ap_driver zcrypt_cex2c_card_driver = {
.flags = AP_DRIVER_FLAG_DEFAULT,
};
-/**
+/*
* Probe function for CEX2C/CEX3C queue devices. It always accepts the
* AP device since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP card device.
@@ -376,7 +376,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX2C/CEX3C queue driver information
* if an AP queue device is removed.
*/
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 1f7ec54142e1..06024bbe9a58 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -394,7 +394,7 @@ static const struct attribute_group ep11_queue_attr_grp = {
.attrs = ep11_queue_attrs,
};
-/**
+/*
* Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
@@ -562,7 +562,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX4/CEX5/CEX6/CEX7 card driver
* information if an AP card device is removed.
*/
@@ -586,7 +586,7 @@ static struct ap_driver zcrypt_cex4_card_driver = {
.flags = AP_DRIVER_FLAG_DEFAULT,
};
-/**
+/*
* Probe function for CEX4/CEX5/CEX6/CEX7 queue device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
@@ -652,7 +652,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX4/CEX5/CEX6/CEX7 queue driver
* information if an AP queue device is removed.
*/
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 99405472824d..99937f3e1d49 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -39,7 +39,7 @@ MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-/**
+/*
* The type 50 message family is associated with a CEXxA cards.
*
* The four members of the family are described below.
@@ -136,7 +136,7 @@ struct type50_crb3_msg {
unsigned char message[512];
} __packed;
-/**
+/*
* The type 80 response family is associated with a CEXxA cards.
*
* Note that all unsigned char arrays are right-justified and left-padded
@@ -188,7 +188,7 @@ unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
return 0;
}
-/**
+/*
* Convert a ICAMEX message to a type50 MEX message.
*
* @zq: crypto queue pointer
@@ -255,7 +255,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Convert a ICACRT message to a type50 CRT message.
*
* @zq: crypto queue pointer
@@ -346,7 +346,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Copy results from a type 80 reply message back to user space.
*
* @zq: crypto device pointer
@@ -418,7 +418,7 @@ static int convert_response_cex2a(struct zcrypt_queue *zq,
}
}
-/**
+/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@@ -457,7 +457,7 @@ out:
static atomic_t zcrypt_step = ATOMIC_INIT(0);
-/**
+/*
* The request distributor calls this function if it picked the CEXxA
* device to handle a modexpo request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -502,7 +502,7 @@ out:
return rc;
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxA
* device to handle a modexpo_crt request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -547,7 +547,7 @@ out:
return rc;
}
-/**
+/*
* The crypto operations for message type 50.
*/
static struct zcrypt_ops zcrypt_msgtype50_ops = {
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 752c6398fcd6..bc5a8c31ba73 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-/**
+/*
* CPRB
* Note that all shorts, ints and longs are little-endian.
* All pointer fields are 32-bits long, and mean nothing
@@ -107,7 +107,7 @@ struct function_and_rules_block {
unsigned char only_rule[8];
} __packed;
-/**
+/*
* The following is used to initialize the CPRBX passed to the CEXxC/CEXxP
* card in a type6 message. The 3 fields that must be filled in at execution
* time are req_parml, rpl_parml and usage_domain.
@@ -236,7 +236,7 @@ int speed_idx_ep11(int req_type)
}
-/**
+/*
* Convert a ICAMEX message to a type6 MEX message.
*
* @zq: crypto device pointer
@@ -305,7 +305,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Convert a ICACRT message to a type6 CRT message.
*
* @zq: crypto device pointer
@@ -374,7 +374,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Convert a XCRB message to a type6 CPRB message.
*
* @zq: crypto device pointer
@@ -571,7 +571,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap
return 0;
}
-/**
+/*
* Copy results from a type 86 ICA reply message back to user space.
*
* @zq: crypto device pointer
@@ -697,7 +697,7 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Copy results from a type 86 XCRB reply message back to user space.
*
* @zq: crypto device pointer
@@ -728,7 +728,7 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Copy results from a type 86 EP11 XCRB reply message back to user space.
*
* @zq: crypto device pointer
@@ -911,7 +911,7 @@ static int convert_response_rng(struct zcrypt_queue *zq,
}
}
-/**
+/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@@ -966,7 +966,7 @@ out:
complete(&(resp_type->work));
}
-/**
+/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@@ -1015,7 +1015,7 @@ out:
static atomic_t zcrypt_step = ATOMIC_INIT(0);
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a modexpo request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1063,7 +1063,7 @@ out_free:
return rc;
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a modexpo_crt request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1112,7 +1112,7 @@ out_free:
return rc;
}
-/**
+/*
* Fetch function code from cprb.
* Extracting the fc requires to copy the cprb from userspace.
* So this function allocates memory and needs an ap_msg prepared
@@ -1140,7 +1140,7 @@ unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *xcRB,
return XCRB_msg_to_type6CPRB_msgX(userspace, ap_msg, xcRB, func_code, dom);
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a send_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1170,7 +1170,7 @@ out:
return rc;
}
-/**
+/*
* Fetch function code from ep11 cprb.
* Extracting the fc requires to copy the ep11 cprb from userspace.
* So this function allocates memory and needs an ap_msg prepared
@@ -1198,7 +1198,7 @@ unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *xcrb,
return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, func_code);
}
-/**
+/*
* The request distributor calls this function if it picked the CEX4P
* device to handle a send_ep11_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1228,7 +1228,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
} __packed * payload_hdr = NULL;
- /**
+ /*
* The target domain field within the cprb body/payload block will be
* replaced by the usage domain for non-management commands only.
* Therefore we check the first bit of the 'flags' parameter for
@@ -1299,7 +1299,7 @@ unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
return 0;
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to generate random data.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1339,7 +1339,7 @@ out:
return rc;
}
-/**
+/*
* The crypto operations for a CEXxC card.
*/
static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index ca473b368905..cbc3b62cd9e5 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -766,7 +766,7 @@ static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
}
/**
- * zfcp_adapter_debug_register - registers debug feature for an adapter
+ * zfcp_dbf_adapter_register - registers debug feature for an adapter
* @adapter: pointer to adapter for which debug features should be registered
* return: -ENOMEM on error, 0 otherwise
*/
@@ -824,7 +824,7 @@ err_out:
}
/**
- * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
+ * zfcp_dbf_adapter_unregister - unregisters debug feature for an adapter
* @adapter: pointer to adapter for which debug features should be unregistered
*/
void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 2e4804ef2fb9..c1f979296c1a 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -2275,7 +2275,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
}
/**
- * zfcp_fsf_close_LUN - close LUN
+ * zfcp_fsf_close_lun - close LUN
* @erp_action: pointer to erp_action triggering the "close LUN"
* Returns: 0 on success, error otherwise
*/
@@ -2377,7 +2377,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
}
}
- blk_add_driver_data(scsi->request, &blktrc, sizeof(blktrc));
+ blk_add_driver_data(scsi_cmd_to_rq(scsi), &blktrc, sizeof(blktrc));
}
/**
@@ -2599,8 +2599,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
io->fcp_cmnd_length = FCP_CMND_LEN;
if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
- io->data_block_length = scsi_cmnd->device->sector_size;
- io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
+ io->data_block_length = scsi_prot_interval(scsi_cmnd);
+ io->ref_tag_value = scsi_prot_ref_tag(scsi_cmnd);
}
if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 8f19bed6384e..6a2720105138 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -384,7 +384,7 @@ free_req_q:
}
/**
- * zfcp_close_qdio - close qdio queues for an adapter
+ * zfcp_qdio_close - close qdio queues for an adapter
* @qdio: pointer to structure zfcp_qdio
*/
void zfcp_qdio_close(struct zfcp_qdio *qdio)
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 59333f0257a8..60f2a04f0869 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -111,9 +111,9 @@ static void zfcp_unit_release(struct device *dev)
}
/**
- * zfcp_unit_enqueue - enqueue unit to unit list of a port.
+ * zfcp_unit_add - add unit to unit list of a port.
* @port: pointer to port where unit is added
- * @fcp_lun: FCP LUN of unit to be enqueued
+ * @fcp_lun: FCP LUN of unit to be added
* Returns: 0 success
*
* Sets up some unit internal structures and creates sysfs entry.
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 1c6b4e672687..a12e3525977d 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1823,7 +1823,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
SCp->device->simple_tags) {
- slot->tag = SCp->request->tag;
+ slot->tag = scsi_cmd_to_rq(SCp)->tag;
CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
slot->tag, slot);
} else {
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index adddcd589941..40088dcb98cd 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -1711,7 +1711,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
blogic_info(" DMA Channel: None, ", adapter);
if (adapter->bios_addr > 0)
- blogic_info("BIOS Address: 0x%lX, ", adapter,
+ blogic_info("BIOS Address: 0x%X, ", adapter,
adapter->bios_addr);
else
blogic_info("BIOS Address: None, ", adapter);
@@ -3436,7 +3436,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
int len = 0;
va_start(args, adapter);
- len = vsprintf(buf, fmt, args);
+ len = vscnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
if (msglevel == BLOGIC_ANNOUNCE_LEVEL) {
static int msglines = 0;
@@ -3451,7 +3451,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
if (buf[0] != '\n' || len > 1)
printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf);
} else
- printk("%s", buf);
+ pr_cont("%s", buf);
} else {
if (begin) {
if (adapter != NULL && adapter->adapter_initd)
@@ -3459,7 +3459,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
else
printk("%s%s", blogic_msglevelmap[msglevel], buf);
} else
- printk("%s", buf);
+ pr_cont("%s", buf);
}
begin = (buf[len - 1] == '\n');
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8f44d433e06e..6e3a04107bb6 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -14,12 +14,16 @@ config RAID_ATTRS
help
Provides RAID
+config SCSI_COMMON
+ tristate
+
config SCSI
tristate "SCSI device support"
depends on BLOCK
select SCSI_DMA if HAS_DMA
select SG_POOL
- select BLK_SCSI_REQUEST
+ select SCSI_COMMON
+ select BLK_DEV_BSG_COMMON if BLK_DEV_BSG
help
If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
any other SCSI device under Linux, say Y and make sure that you know
@@ -140,6 +144,18 @@ config CHR_DEV_SG
If unsure, say N.
+config BLK_DEV_BSG
+ bool "/dev/bsg support (SG v4)"
+ depends on SCSI
+ default y
+ help
+ Saying Y here will enable generic SG (SCSI generic) v4 support
+ for any SCSI device.
+
+ This option is required by UDEV to access device serial numbers, etc.
+
+ If unsure, say Y.
+
config CHR_DEV_SCH
tristate "SCSI media changer support"
depends on SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1748d1ec1338..19814c26c908 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -20,7 +20,7 @@ CFLAGS_aha152x.o = -DAHA152X_STAT -DAUTOCONF
obj-$(CONFIG_PCMCIA) += pcmcia/
obj-$(CONFIG_SCSI) += scsi_mod.o
-obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_common.o
+obj-$(CONFIG_SCSI_COMMON) += scsi_common.o
obj-$(CONFIG_RAID_ATTRS) += raid_class.o
@@ -168,6 +168,7 @@ scsi_mod-$(CONFIG_BLK_DEBUG_FS) += scsi_debugfs.o
scsi_mod-y += scsi_trace.o scsi_logging.o
scsi_mod-$(CONFIG_PM) += scsi_pm.o
scsi_mod-$(CONFIG_SCSI_DH) += scsi_dh.o
+scsi_mod-$(CONFIG_BLK_DEV_BSG) += scsi_bsg.o
hv_storvsc-y := storvsc_drv.o
@@ -183,7 +184,7 @@ CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
zalon7xx-objs := zalon.o ncr53c8xx.o
# Files generated that shall be removed upon make clean
-clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
+clean-files := 53c700_d.h 53c700_u.h
$(obj)/53c700.o: $(obj)/53c700_d.h
@@ -192,9 +193,11 @@ $(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
quiet_cmd_bflags = GEN $@
cmd_bflags = sed -n 's/.*define *BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
-$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h
+$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h FORCE
$(call if_changed,bflags)
+targets += scsi_devinfo_tbl.c
+
# If you want to play with the firmware, uncomment
# GENERATE_FIRMWARE := 1
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 3baadd068768..a85589a2a8af 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -778,7 +778,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
}
#ifdef CONFIG_SUN3
- if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
+ if (sun3scsi_dma_finish(hostdata->connected->sc_data_direction)) {
pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n",
instance->host_no);
BUG();
@@ -1710,7 +1710,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
count = sun3scsi_dma_xfer_len(hostdata, cmd);
if (count > 0) {
- if (rq_data_dir(cmd->request))
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
sun3scsi_dma_send_setup(hostdata,
cmd->SCp.ptr, count);
else
@@ -2158,7 +2158,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
count = sun3scsi_dma_xfer_len(hostdata, tmp);
if (count > 0) {
- if (rq_data_dir(tmp->request))
+ if (tmp->sc_data_direction == DMA_TO_DEVICE)
sun3scsi_dma_send_setup(hostdata,
tmp->SCp.ptr, count);
else
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 46b8dffce2dd..c2d6f0a9e0b1 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -25,7 +25,6 @@
#include <linux/completion.h>
#include <linux/blkdev.h>
#include <linux/uaccess.h>
-#include <linux/highmem.h> /* For flush_kernel_dcache_page */
#include <linux/module.h>
#include <asm/unaligned.h>
@@ -1505,7 +1504,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
srbcmd->id = cpu_to_le32(scmd_id(cmd));
srbcmd->lun = cpu_to_le32(cmd->device->lun);
srbcmd->flags = cpu_to_le32(flag);
- timeout = cmd->request->timeout/HZ;
+ timeout = scsi_cmd_to_rq(cmd)->timeout / HZ;
if (timeout == 0)
timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT);
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 54eb4d41bc2c..deb32c9f4b3e 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -224,7 +224,7 @@ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
{
struct fib *fibptr;
- fibptr = &dev->fibs[scmd->request->tag];
+ fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
/*
* Null out fields that depend on being zero at the start of
* each I/O
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index f3377e2ef5fb..ffb391967573 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7423,7 +7423,7 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
* Set the srb_tag to the command tag + 1, as
* srb_tag '0' is used internally by the chip.
*/
- srb_tag = scp->request->tag + 1;
+ srb_tag = scsi_cmd_to_rq(scp)->tag + 1;
asc_scsi_q->q2.srb_tag = srb_tag;
/*
@@ -7637,7 +7637,7 @@ static int
adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
adv_req_t **adv_reqpp)
{
- u32 srb_tag = scp->request->tag;
+ u32 srb_tag = scsi_cmd_to_rq(scp)->tag;
adv_req_t *reqp;
ADV_SCSI_REQ_Q *scsiqp;
int ret;
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 1210e61afb18..584a59522038 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -262,11 +262,12 @@ static void aha1542_free_cmd(struct scsi_cmnd *cmd)
struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ struct request *rq = scsi_cmd_to_rq(cmd);
void *buf = acmd->data_buffer;
struct req_iterator iter;
struct bio_vec bv;
- rq_for_each_segment(bv, cmd->request, iter) {
+ rq_for_each_segment(bv, rq, iter) {
memcpy_to_page(bv.bv_page, bv.bv_offset, buf,
bv.bv_len);
buf += bv.bv_len;
@@ -447,11 +448,12 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
#endif
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ struct request *rq = scsi_cmd_to_rq(cmd);
void *buf = acmd->data_buffer;
struct req_iterator iter;
struct bio_vec bv;
- rq_for_each_segment(bv, cmd->request, iter) {
+ rq_for_each_segment(bv, rq, iter) {
memcpy_from_page(buf, bv.bv_page, bv.bv_offset,
bv.bv_len);
buf += bv.bv_len;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 462717bbb5b7..4e899ec1477d 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -235,8 +235,7 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
wrb = alloc_mcc_wrb(phba, &tag);
if (!wrb) {
mutex_unlock(&ctrl->mbox_lock);
- rc = -ENOMEM;
- goto free_cmd;
+ return -ENOMEM;
}
sge = nonembedded_sgl(wrb);
@@ -269,24 +268,6 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
/* copy the response, if any */
if (resp_buf)
memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
- /**
- * This is special case of NTWK_GET_IF_INFO where the size of
- * response is not known. beiscsi_if_get_info checks the return
- * value to free DMA buffer.
- */
- if (rc == -EAGAIN)
- return rc;
-
- /**
- * If FW is busy that is driver timed out, DMA buffer is saved with
- * the tag, only when the cmd completes this buffer is freed.
- */
- if (rc == -EBUSY)
- return rc;
-
-free_cmd:
- dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd->size,
- nonemb_cmd->va, nonemb_cmd->dma);
return rc;
}
@@ -309,6 +290,19 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
return 0;
}
+static void beiscsi_free_nemb_cmd(struct beiscsi_hba *phba,
+ struct be_dma_mem *cmd, int rc)
+{
+ /*
+ * If FW is busy the DMA buffer is saved with the tag. When the cmd
+ * completes this buffer is freed.
+ */
+ if (rc == -EBUSY)
+ return;
+
+ dma_free_coherent(&phba->ctrl.pdev->dev, cmd->size, cmd->va, cmd->dma);
+}
+
static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag)
{
struct be_dma_mem *tag_mem;
@@ -344,8 +338,16 @@ int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
cpu_to_le32(set_eqd[i].delay_multiplier);
}
- return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd,
- __beiscsi_eq_delay_compl, NULL, 0);
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, __beiscsi_eq_delay_compl,
+ NULL, 0);
+ if (rc) {
+ /*
+ * Only free on failure. Async cmds are handled like -EBUSY
+ * where it's handled for us.
+ */
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
+ }
+ return rc;
}
/**
@@ -372,6 +374,7 @@ int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg)
req->hdr.version = 1;
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
&resp, sizeof(resp));
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -449,7 +452,9 @@ static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
req->ip_addr.ip_type = ip_type;
memcpy(req->ip_addr.addr, gw,
(ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN);
- return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
+ rt_val = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rt_val);
+ return rt_val;
}
int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw)
@@ -499,8 +504,10 @@ int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
req = nonemb_cmd.va;
req->ip_type = ip_type;
- return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
- resp, sizeof(*resp));
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, resp,
+ sizeof(*resp));
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
+ return rc;
}
static int
@@ -537,6 +544,7 @@ beiscsi_if_clr_ip(struct beiscsi_hba *phba,
"BG_%d : failed to clear IP: rc %d status %d\n",
rc, req->ip_params.ip_record.status);
}
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
return rc;
}
@@ -581,6 +589,7 @@ beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
if (req->ip_params.ip_record.status)
rc = -EINVAL;
}
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
return rc;
}
@@ -608,6 +617,7 @@ int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
reldhcp->interface_hndl = phba->interface_handle;
reldhcp->ip_type = ip_type;
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
if (rc < 0) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : failed to release existing DHCP: %d\n",
@@ -689,7 +699,7 @@ int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
dhcpreq->interface_hndl = phba->interface_handle;
dhcpreq->ip_type = ip_type;
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
-
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
exit:
kfree(if_info);
return rc;
@@ -762,11 +772,8 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BG_%d : Memory Allocation Failure\n");
- /* Free the DMA memory for the IOCTL issuing */
- dma_free_coherent(&phba->ctrl.pdev->dev,
- nonemb_cmd.size,
- nonemb_cmd.va,
- nonemb_cmd.dma);
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd,
+ -ENOMEM);
return -ENOMEM;
}
@@ -781,15 +788,13 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
nonemb_cmd.va)->actual_resp_len;
ioctl_size += sizeof(struct be_cmd_req_hdr);
- /* Free the previous allocated DMA memory */
- dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
- nonemb_cmd.va,
- nonemb_cmd.dma);
-
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
/* Free the virtual memory */
kfree(*if_info);
- } else
+ } else {
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
break;
+ }
} while (true);
return rc;
}
@@ -806,8 +811,9 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
if (rc)
return rc;
- return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
- nic, sizeof(*nic));
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, nic, sizeof(*nic));
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
+ return rc;
}
static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 43e8a1dafec0..5521469ce678 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1918,7 +1918,7 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
spin_unlock(&session->back_lock);
- p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(sc->request));
+ p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(scsi_cmd_to_rq(sc)));
spin_lock(&p->p_work_lock);
if (unlikely(!p->iothread)) {
rc = -EINVAL;
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index fc7197abfcdf..27012908b586 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -618,6 +618,12 @@ ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit)
return 0;
}
+struct changer_element_status32 {
+ int ces_type;
+ compat_uptr_t ces_data;
+};
+#define CHIOGSTATUS32 _IOW('c', 8, struct changer_element_status32)
+
static long ch_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -748,7 +754,20 @@ static long ch_ioctl(struct file *file,
return ch_gstatus(ch, ces.ces_type, ces.ces_data);
}
+#ifdef CONFIG_COMPAT
+ case CHIOGSTATUS32:
+ {
+ struct changer_element_status32 ces32;
+ if (copy_from_user(&ces32, argp, sizeof(ces32)))
+ return -EFAULT;
+ if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
+ return -EINVAL;
+
+ return ch_gstatus(ch, ces32.ces_type,
+ compat_ptr(ces32.ces_data));
+ }
+#endif
case CHIOGELEM:
{
struct changer_get_element cge;
@@ -858,59 +877,11 @@ static long ch_ioctl(struct file *file,
}
default:
- return scsi_ioctl(ch->device, cmd, argp);
+ return scsi_ioctl(ch->device, NULL, file->f_mode, cmd, argp);
}
}
-#ifdef CONFIG_COMPAT
-
-struct changer_element_status32 {
- int ces_type;
- compat_uptr_t ces_data;
-};
-#define CHIOGSTATUS32 _IOW('c', 8,struct changer_element_status32)
-
-static long ch_ioctl_compat(struct file * file,
- unsigned int cmd, unsigned long arg)
-{
- scsi_changer *ch = file->private_data;
- int retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd,
- file->f_flags & O_NDELAY);
- if (retval)
- return retval;
-
- switch (cmd) {
- case CHIOGPARAMS:
- case CHIOGVPARAMS:
- case CHIOPOSITION:
- case CHIOMOVE:
- case CHIOEXCHANGE:
- case CHIOGELEM:
- case CHIOINITELEM:
- case CHIOSVOLTAG:
- /* compatible */
- return ch_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
- case CHIOGSTATUS32:
- {
- struct changer_element_status32 ces32;
- unsigned char __user *data;
-
- if (copy_from_user(&ces32, (void __user *)arg, sizeof (ces32)))
- return -EFAULT;
- if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
- return -EINVAL;
-
- data = compat_ptr(ces32.ces_data);
- return ch_gstatus(ch, ces32.ces_type, data);
- }
- default:
- return scsi_compat_ioctl(ch->device, cmd, compat_ptr(arg));
-
- }
-}
-#endif
-
/* ------------------------------------------------------------------------ */
static int ch_probe(struct device *dev)
@@ -1015,9 +986,7 @@ static const struct file_operations changer_fops = {
.open = ch_open,
.release = ch_release,
.unlocked_ioctl = ch_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ch_ioctl_compat,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 56b9ad0a1ca0..3b2eb6ce1fcf 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1786,7 +1786,7 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
struct csio_scsi_qset *sqset;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
- sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(cmnd->request)];
+ sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(scsi_cmd_to_rq(cmnd))];
nr = fc_remote_port_chkready(rport);
if (nr) {
@@ -1989,13 +1989,13 @@ inval_scmnd:
csio_info(hw,
"Aborted SCSI command to (%d:%llu) tag %u\n",
cmnd->device->id, cmnd->device->lun,
- cmnd->request->tag);
+ scsi_cmd_to_rq(cmnd)->tag);
return SUCCESS;
} else {
csio_info(hw,
"Failed to abort SCSI command, (%d:%llu) tag %u\n",
cmnd->device->id, cmnd->device->lun,
- cmnd->request->tag);
+ scsi_cmd_to_rq(cmnd)->tag);
return FAILED;
}
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 222593bc2afe..b2730e859df8 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -433,7 +433,7 @@ static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
hwq = afu->hwq_rr_count++ % afu->num_hwqs;
break;
case HWQ_MODE_TAG:
- tag = blk_mq_unique_tag(scp->request);
+ tag = blk_mq_unique_tag(scsi_cmd_to_rq(scp));
hwq = blk_mq_unique_tag_to_hwq(tag);
break;
case HWQ_MODE_CPU:
@@ -1629,8 +1629,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
{
struct device *dev = &cfg->dev->dev;
struct pci_dev *pdev = cfg->dev;
- int rc = 0;
- int ro_start, ro_size, i, j, k;
+ int i, k, rc = 0;
+ unsigned int kw_size;
ssize_t vpd_size;
char vpd_data[CXLFLASH_VPD_LEN];
char tmp_buf[WWPN_BUF_LEN] = { 0 };
@@ -1648,24 +1648,6 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
goto out;
}
- /* Get the read only section offset */
- ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (unlikely(ro_start < 0)) {
- dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
- rc = -ENODEV;
- goto out;
- }
-
- /* Get the read only section size, cap when extends beyond read VPD */
- ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- if (unlikely((i + j) > vpd_size)) {
- dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
- __func__, (i + j), vpd_size);
- ro_size = vpd_size - i;
- }
-
/*
* Find the offset of the WWPN tag within the read only
* VPD data and validate the found field (partials are
@@ -1681,11 +1663,9 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
* ports programmed and operate in an undefined state.
*/
for (k = 0; k < cfg->num_fc_ports; k++) {
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
-
- i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
- if (i < 0) {
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ wwpn_vpd_tags[k], &kw_size);
+ if (i == -ENOENT) {
if (wwpn_vpd_required)
dev_err(dev, "%s: Port %d WWPN not found\n",
__func__, k);
@@ -1693,9 +1673,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
continue;
}
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
+ if (i < 0 || kw_size != WWPN_LEN) {
dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
__func__, k);
rc = -ENODEV;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index a18a4a08f049..7af96d14c9bc 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -652,7 +652,7 @@ static int adpt_abort(struct scsi_cmnd * cmd)
msg[2] = 0;
msg[3]= 0;
/* Add 1 to avoid firmware treating it as invalid command */
- msg[4] = cmd->request->tag + 1;
+ msg[4] = scsi_cmd_to_rq(cmd)->tag + 1;
if (pHba->host)
spin_lock_irq(pHba->host->host_lock);
rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
@@ -2236,7 +2236,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
msg[2] = 0;
/* Add 1 to avoid firmware treating it as invalid command */
- msg[3] = cmd->request->tag + 1;
+ msg[3] = scsi_cmd_to_rq(cmd)->tag + 1;
// Our cards use the transaction context as the tag for queueing
// Adaptec/DPT Private stuff
msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
diff --git a/drivers/scsi/elx/efct/efct_driver.h b/drivers/scsi/elx/efct/efct_driver.h
index dab8eac4f243..0e3c931db7c2 100644
--- a/drivers/scsi/elx/efct/efct_driver.h
+++ b/drivers/scsi/elx/efct/efct_driver.h
@@ -10,7 +10,6 @@
/***************************************************************************
* OS specific includes
*/
-#include <stdarg.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/firmware.h>
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
index e0d798d6baee..bb3b460dc0bc 100644
--- a/drivers/scsi/elx/efct/efct_lio.c
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -780,7 +780,7 @@ efct_lio_npiv_make_nport(struct target_fabric_configfs *tf,
{
struct efct_lio_vport *lio_vport;
struct efct *efct;
- int ret = -1;
+ int ret;
u64 p_wwpn, npiv_wwpn, npiv_wwnn;
char *p, *pbuf, tmp[128];
struct efct_lio_vport_list_t *vport_list;
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 762cc8bd2653..f8afbfb468dc 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -107,7 +107,7 @@ static void fnic_cleanup_io(struct fnic *fnic);
static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
struct scsi_cmnd *sc)
{
- u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
+ u32 hash = scsi_cmd_to_rq(sc)->tag & (FNIC_IO_LOCKS - 1);
return &fnic->io_req_lock[hash];
}
@@ -390,7 +390,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
(rp->flags & FC_RP_FLAGS_RETRY))
exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
- fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
+ fnic_queue_wq_copy_desc_icmnd_16(wq, scsi_cmd_to_rq(sc)->tag,
0, exch_flags, io_req->sgl_cnt,
SCSI_SENSE_BUFFERSIZE,
io_req->sgl_list_pa,
@@ -422,6 +422,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
*/
static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
+ const int tag = scsi_cmd_to_rq(sc)->tag;
struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
struct fnic_io_req *io_req = NULL;
@@ -511,8 +512,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
sg_count = scsi_dma_map(sc);
if (sg_count < 0) {
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- sc->request->tag, sc, 0, sc->cmnd[0],
- sg_count, CMD_STATE(sc));
+ tag, sc, 0, sc->cmnd[0], sg_count, CMD_STATE(sc));
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
@@ -571,7 +571,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
* refetch the pointer under the lock.
*/
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- sc->request->tag, sc, 0, 0, 0,
+ tag, sc, 0, 0, 0,
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
io_req = (struct fnic_io_req *)CMD_SP(sc);
CMD_SP(sc) = NULL;
@@ -603,8 +603,7 @@ out:
sc->cmnd[5]);
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- sc->request->tag, sc, io_req,
- sg_count, cmd_trace,
+ tag, sc, io_req, sg_count, cmd_trace,
(((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
/* if only we issued IO, will we have the io lock */
@@ -1364,6 +1363,7 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
bool reserved)
{
+ const int tag = scsi_cmd_to_rq(sc)->tag;
struct fnic *fnic = data;
struct fnic_io_req *io_req;
unsigned long flags = 0;
@@ -1371,7 +1371,7 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
unsigned long start_time = 0;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- io_lock = fnic_io_lock_tag(fnic, sc->request->tag);
+ io_lock = fnic_io_lock_tag(fnic, tag);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1413,7 +1413,7 @@ cleanup_scsi_cmd:
sc->result = DID_TRANSPORT_DISRUPTED << 16;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
- sc->request->tag, sc, (jiffies - start_time));
+ tag, sc, jiffies - start_time);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
@@ -1425,10 +1425,10 @@ cleanup_scsi_cmd:
if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
shost_printk(KERN_ERR, fnic->lport->host,
"Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
- sc->request->tag, sc);
+ tag, sc);
FNIC_TRACE(fnic_cleanup_io,
- sc->device->host->host_no, sc->request->tag, sc,
+ sc->device->host->host_no, tag, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 |
@@ -1566,7 +1566,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
{
struct fnic_rport_abort_io_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
- int abt_tag = sc->request->tag;
+ int abt_tag = scsi_cmd_to_rq(sc)->tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1727,6 +1727,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
*/
int fnic_abort_cmd(struct scsi_cmnd *sc)
{
+ struct request *const rq = scsi_cmd_to_rq(sc);
struct fc_lport *lp;
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
@@ -1741,7 +1742,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
struct abort_stats *abts_stats;
struct terminate_stats *term_stats;
enum fnic_ioreq_state old_ioreq_state;
- int tag;
+ const int tag = rq->tag;
unsigned long abt_issued_time;
DECLARE_COMPLETION_ONSTACK(tm_done);
@@ -1757,7 +1758,6 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
term_stats = &fnic->fnic_stats.term_stats;
rport = starget_to_rport(scsi_target(sc->device));
- tag = sc->request->tag;
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
@@ -1842,8 +1842,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
- fc_lun.scsi_lun, io_req)) {
+ if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun,
+ io_req)) {
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = old_ioreq_state;
@@ -1943,8 +1943,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
}
fnic_abort_cmd_end:
- FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
- sc->request->tag, sc,
+ FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
@@ -1994,7 +1993,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
/* fill in the lun info */
int_to_scsilun(sc->device->lun, &fc_lun);
- fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
+ fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST,
0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
fc_lun.scsi_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov);
@@ -2025,7 +2024,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
struct fnic_pending_aborts_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
struct scsi_device *lun_dev = iter_data->lun_dev;
- int abt_tag = sc->request->tag;
+ int abt_tag = scsi_cmd_to_rq(sc)->tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -2206,14 +2205,15 @@ clean_pending_aborts_end:
static inline int
fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
{
- struct request_queue *q = sc->request->q;
+ struct request *rq = scsi_cmd_to_rq(sc);
+ struct request_queue *q = rq->q;
struct request *dummy;
dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
if (IS_ERR(dummy))
return SCSI_NO_TAG;
- sc->tag = sc->request->tag = dummy->tag;
+ rq->tag = dummy->tag;
sc->host_scribble = (unsigned char *)dummy;
return dummy->tag;
@@ -2238,6 +2238,7 @@ fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
*/
int fnic_device_reset(struct scsi_cmnd *sc)
{
+ struct request *rq = scsi_cmd_to_rq(sc);
struct fc_lport *lp;
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
@@ -2250,7 +2251,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
struct scsi_lun fc_lun;
struct fnic_stats *fnic_stats;
struct reset_stats *reset_stats;
- int tag = 0;
+ int tag = rq->tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
bool new_sc = 0;
@@ -2284,7 +2285,6 @@ int fnic_device_reset(struct scsi_cmnd *sc)
CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
/* Allocate tag if not present */
- tag = sc->request->tag;
if (unlikely(tag < 0)) {
/*
* Really should fix the midlayer to pass in a proper
@@ -2458,8 +2458,7 @@ fnic_device_reset_clean:
}
fnic_device_reset_end:
- FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
- sc->request->tag, sc,
+ FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 3a903e8e0384..9515c45affa5 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -185,7 +185,7 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
void *bitmap = hisi_hba->slot_index_tags;
if (scsi_cmnd)
- return scsi_cmnd->request->tag;
+ return scsi_cmd_to_rq(scsi_cmnd)->tag;
spin_lock(&hisi_hba->lock);
index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
@@ -449,7 +449,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
unsigned int dq_index;
u32 blk_tag;
- blk_tag = blk_mq_unique_tag(scmd->request);
+ blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
*dq_pointer = dq = &hisi_hba->dq[dq_index];
} else {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index a4885d03afe2..3ab669dc806f 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1153,7 +1153,7 @@ static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd,
{
unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
unsigned int interval = scsi_prot_interval(scsi_cmnd);
- u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request);
+ u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmd_to_rq(scsi_cmnd));
switch (prot_op) {
case SCSI_PROT_READ_INSERT:
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index f135a10f582b..3faa87fa296a 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5686,7 +5686,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
/* Get the ptr to our adapter structure out of cmd->host. */
h = sdev_to_hba(cmd->device);
- BUG_ON(cmd->request->tag < 0);
+ BUG_ON(scsi_cmd_to_rq(cmd)->tag < 0);
dev = cmd->device->hostdata;
if (!dev) {
@@ -5729,7 +5729,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
* and is therefore a brand-new command.
*/
if (likely(cmd->retries == 0 &&
- !blk_rq_is_passthrough(cmd->request) &&
+ !blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) &&
h->acciopath_status)) {
/* Submit with the retry_pending flag unset. */
rc = hpsa_ioaccel_submit(h, c, cmd, false);
@@ -5894,7 +5894,7 @@ static int hpsa_scsi_add_host(struct ctlr_info *h)
*/
static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
{
- int idx = scmd->request->tag;
+ int idx = scsi_cmd_to_rq(scmd)->tag;
if (idx < 0)
return idx;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 935b01ee44b7..1f1586ad48fe 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1926,7 +1926,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
struct ibmvfc_cmd *vfc_cmd;
struct ibmvfc_fcp_cmd_iu *iu;
struct ibmvfc_event *evt;
- u32 tag_and_hwq = blk_mq_unique_tag(cmnd->request);
+ u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
u16 scsi_channel;
int rc;
@@ -1956,7 +1956,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
if (cmnd->flags & SCMD_TAGGED) {
- vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
+ vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag);
iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
}
@@ -3292,14 +3292,18 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
int done = 0;
spin_lock_irqsave(shost->host_lock, flags);
- if (time >= (init_timeout * HZ)) {
+ if (!vhost->scan_timeout)
+ done = 1;
+ else if (time >= (vhost->scan_timeout * HZ)) {
dev_info(vhost->dev, "Scan taking longer than %d seconds, "
- "continuing initialization\n", init_timeout);
+ "continuing initialization\n", vhost->scan_timeout);
done = 1;
}
- if (vhost->scan_complete)
+ if (vhost->scan_complete) {
+ vhost->scan_timeout = init_timeout;
done = 1;
+ }
spin_unlock_irqrestore(shost->host_lock, flags);
return done;
}
@@ -6084,6 +6088,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels);
vhost->using_channels = 0;
vhost->do_enquiry = 1;
+ vhost->scan_timeout = 0;
strcpy(vhost->partition_name, "UNKNOWN");
init_waitqueue_head(&vhost->work_wait_q);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 92fb889d7eb0..3718406e0988 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -876,6 +876,7 @@ struct ibmvfc_host {
int reinit;
int delay_init;
int scan_complete;
+ int scan_timeout;
int logged_in;
int mq_enabled;
int using_channels;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index e6a3eaaa57d9..50df7dd9cb91 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1072,7 +1072,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
init_event_struct(evt_struct,
handle_cmd_rsp,
VIOSRP_SRP_FORMAT,
- cmnd->request->timeout/HZ);
+ scsi_cmd_to_rq(cmnd)->timeout / HZ);
evt_struct->cmnd = cmnd;
evt_struct->cmnd_done = done;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 8b33c9871484..cdd94fb2aab7 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3735,7 +3735,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
scb->cmd.dcdb.segment_4G = 0;
scb->cmd.dcdb.enhanced_sg = 0;
- TimeOut = scb->scsi_cmd->request->timeout;
+ TimeOut = scsi_cmd_to_rq(scb->scsi_cmd)->timeout;
if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
if (!scb->sg_len) {
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index e1ff79464131..fcaa84a3c210 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -341,7 +341,7 @@ static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
tc->reserved_E8_0 = 0;
if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
- tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
+ tc->ref_tag_seed_gen = scsi_prot_ref_tag(scmd);
else if (type & SCSI_PROT_DIF_TYPE3)
tc->ref_tag_seed_gen = 0;
}
@@ -369,7 +369,7 @@ static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
tc->app_tag_gen = 0;
if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
- tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
+ tc->ref_tag_seed_verify = scsi_prot_ref_tag(scmd);
else if (type & SCSI_PROT_DIF_TYPE3)
tc->ref_tag_seed_verify = 0;
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index 6d14a7a94d0b..86fe19e0468d 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -134,7 +134,7 @@ lasi700_probe(struct parisc_device *dev)
return -ENODEV;
}
-static int __exit
+static void __exit
lasi700_driver_remove(struct parisc_device *dev)
{
struct Scsi_Host *host = dev_get_drvdata(&dev->dev);
@@ -146,8 +146,6 @@ lasi700_driver_remove(struct parisc_device *dev)
free_irq(host->irq, host);
iounmap(hostdata->base);
kfree(hostdata);
-
- return 0;
}
static struct parisc_driver lasi700_driver __refdata = {
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index 052ee3a26f6e..c640535d1ac0 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -10,7 +10,6 @@ config SCSI_SAS_LIBSAS
tristate "SAS Domain Transport Attributes"
depends on SCSI
select SCSI_SAS_ATTRS
- select BLK_DEV_BSGLIB
help
This provides transport specific helpers for SAS drivers which
use the domain device construct (like the aic94xxx).
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
index e63a54f5ab8c..9dc32736cf21 100644
--- a/drivers/scsi/libsas/Makefile
+++ b/drivers/scsi/libsas/Makefile
@@ -18,4 +18,4 @@ libsas-y += sas_init.o \
libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o
libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o
-ccflags-y := -DDEBUG
+ccflags-y := -DDEBUG -I$(srctree)/drivers/scsi
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 4aa1fda95f35..a315715b3622 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -20,8 +20,8 @@
#include <scsi/scsi.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
-#include "../scsi_transport_api.h"
+#include "scsi_sas_internal.h"
+#include "scsi_transport_api.h"
#include <scsi/scsi_eh.h>
static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
@@ -596,7 +596,7 @@ void sas_ata_task_abort(struct sas_task *task)
/* Bounce SCSI-initiated commands to the SCSI EH */
if (qc->scsicmd) {
- blk_abort_request(qc->scsicmd->request);
+ blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
return;
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index dd205414e505..12e1e36d7c04 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -16,7 +16,7 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/sas_ata.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
/* ---------- Basic task processing for discovery purposes ---------- */
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index e00688540219..c2150a818423 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -18,7 +18,7 @@
#include <scsi/sas_ata.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
static int sas_discover_expander(struct domain_device *dev);
static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr);
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index eca2a6bf3601..32cdc969b736 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -14,7 +14,7 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data,
u8 phy_id)
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 2b0f98ca6ec3..80592f53017a 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -19,7 +19,7 @@
#include "sas_internal.h"
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
static struct kmem_cache *sas_task_cache;
static struct kmem_cache *sas_event_cache;
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 4ca4b1f30bd0..a0d592d11dfb 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -10,7 +10,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
/* ---------- Phy events ---------- */
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index e3d03d744713..67b429dcf1ff 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -10,7 +10,7 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy)
{
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index ee44a0d7730b..08ffb8788290 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -22,9 +22,9 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/sas_ata.h>
-#include "../scsi_sas_internal.h"
-#include "../scsi_transport_api.h"
-#include "../scsi_priv.h"
+#include "scsi_sas_internal.h"
+#include "scsi_transport_api.h"
+#include "scsi_priv.h"
#include <linux/err.h>
#include <linux/blkdev.h>
@@ -908,7 +908,7 @@ void sas_task_abort(struct sas_task *task)
if (dev_is_sata(task->dev))
sas_ata_task_abort(task);
else
- blk_abort_request(sc->request);
+ blk_abort_request(scsi_cmd_to_rq(sc));
}
int sas_slave_alloc(struct scsi_device *sdev)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 17028861234b..befeb7c34290 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -114,6 +114,12 @@ struct lpfc_sli2_slim;
#define LPFC_MBX_NO_WAIT 0
#define LPFC_MBX_WAIT 1
+#define LPFC_CFG_PARAM_MAGIC_NUM 0xFEAA0005
+#define LPFC_PORT_CFG_NAME "/cfg/port.cfg"
+
+#define lpfc_rangecheck(val, min, max) \
+ ((uint)(val) >= (uint)(min) && (val) <= (max))
+
enum lpfc_polling_flags {
ENABLE_FCP_RING_POLLING = 0x1,
DISABLE_FCP_RING_INT = 0x2
@@ -403,6 +409,160 @@ struct lpfc_trunk_link {
link3;
};
+/* Format of congestion module parameters */
+struct lpfc_cgn_param {
+ uint32_t cgn_param_magic;
+ uint8_t cgn_param_version; /* version 1 */
+ uint8_t cgn_param_mode; /* 0=off 1=managed 2=monitor only */
+#define LPFC_CFG_OFF 0
+#define LPFC_CFG_MANAGED 1
+#define LPFC_CFG_MONITOR 2
+ uint8_t cgn_rsvd1;
+ uint8_t cgn_rsvd2;
+ uint8_t cgn_param_level0;
+ uint8_t cgn_param_level1;
+ uint8_t cgn_param_level2;
+ uint8_t byte11;
+ uint8_t byte12;
+ uint8_t byte13;
+ uint8_t byte14;
+ uint8_t byte15;
+};
+
+/* Max number of days of congestion data */
+#define LPFC_MAX_CGN_DAYS 10
+
+/* Format of congestion buffer info
+ * This structure defines memory thats allocated and registered with
+ * the HBA firmware. When adding or removing fields from this structure
+ * the alignment must match the HBA firmware.
+ */
+
+struct lpfc_cgn_info {
+ /* Header */
+ __le16 cgn_info_size; /* is sizeof(struct lpfc_cgn_info) */
+ uint8_t cgn_info_version; /* represents format of structure */
+#define LPFC_CGN_INFO_V1 1
+#define LPFC_CGN_INFO_V2 2
+#define LPFC_CGN_INFO_V3 3
+ uint8_t cgn_info_mode; /* 0=off 1=managed 2=monitor only */
+ uint8_t cgn_info_detect;
+ uint8_t cgn_info_action;
+ uint8_t cgn_info_level0;
+ uint8_t cgn_info_level1;
+ uint8_t cgn_info_level2;
+
+ /* Start Time */
+ uint8_t cgn_info_month;
+ uint8_t cgn_info_day;
+ uint8_t cgn_info_year;
+ uint8_t cgn_info_hour;
+ uint8_t cgn_info_minute;
+ uint8_t cgn_info_second;
+
+ /* minute / hours / daily indices */
+ uint8_t cgn_index_minute;
+ uint8_t cgn_index_hour;
+ uint8_t cgn_index_day;
+
+ __le16 cgn_warn_freq;
+ __le16 cgn_alarm_freq;
+ __le16 cgn_lunq;
+ uint8_t cgn_pad1[8];
+
+ /* Driver Information */
+ __le16 cgn_drvr_min[60];
+ __le32 cgn_drvr_hr[24];
+ __le32 cgn_drvr_day[LPFC_MAX_CGN_DAYS];
+
+ /* Congestion Warnings */
+ __le16 cgn_warn_min[60];
+ __le32 cgn_warn_hr[24];
+ __le32 cgn_warn_day[LPFC_MAX_CGN_DAYS];
+
+ /* Latency Information */
+ __le32 cgn_latency_min[60];
+ __le32 cgn_latency_hr[24];
+ __le32 cgn_latency_day[LPFC_MAX_CGN_DAYS];
+
+ /* Bandwidth Information */
+ __le16 cgn_bw_min[60];
+ __le16 cgn_bw_hr[24];
+ __le16 cgn_bw_day[LPFC_MAX_CGN_DAYS];
+
+ /* Congestion Alarms */
+ __le16 cgn_alarm_min[60];
+ __le32 cgn_alarm_hr[24];
+ __le32 cgn_alarm_day[LPFC_MAX_CGN_DAYS];
+
+ /* Start of congestion statistics */
+ uint8_t cgn_stat_npm; /* Notifications per minute */
+
+ /* Start Time */
+ uint8_t cgn_stat_month;
+ uint8_t cgn_stat_day;
+ uint8_t cgn_stat_year;
+ uint8_t cgn_stat_hour;
+ uint8_t cgn_stat_minute;
+ uint8_t cgn_pad2[2];
+
+ __le32 cgn_notification;
+ __le32 cgn_peer_notification;
+ __le32 link_integ_notification;
+ __le32 delivery_notification;
+
+ uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */
+ uint8_t cgn_stat_cgn_day;
+ uint8_t cgn_stat_cgn_year;
+ uint8_t cgn_stat_cgn_hour;
+ uint8_t cgn_stat_cgn_min;
+ uint8_t cgn_stat_cgn_sec;
+
+ uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */
+ uint8_t cgn_stat_peer_day;
+ uint8_t cgn_stat_peer_year;
+ uint8_t cgn_stat_peer_hour;
+ uint8_t cgn_stat_peer_min;
+ uint8_t cgn_stat_peer_sec;
+
+ uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */
+ uint8_t cgn_stat_lnk_day;
+ uint8_t cgn_stat_lnk_year;
+ uint8_t cgn_stat_lnk_hour;
+ uint8_t cgn_stat_lnk_min;
+ uint8_t cgn_stat_lnk_sec;
+
+ uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */
+ uint8_t cgn_stat_del_day;
+ uint8_t cgn_stat_del_year;
+ uint8_t cgn_stat_del_hour;
+ uint8_t cgn_stat_del_min;
+ uint8_t cgn_stat_del_sec;
+#define LPFC_CGN_STAT_SIZE 48
+#define LPFC_CGN_DATA_SIZE (sizeof(struct lpfc_cgn_info) - \
+ LPFC_CGN_STAT_SIZE - sizeof(uint32_t))
+
+ __le32 cgn_info_crc;
+#define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41
+#define LPFC_CGN_CRC32_SEED 0xFFFFFFFF
+};
+
+#define LPFC_CGN_INFO_SZ (sizeof(struct lpfc_cgn_info) - \
+ sizeof(uint32_t))
+
+struct lpfc_cgn_stat {
+ atomic64_t total_bytes;
+ atomic64_t rcv_bytes;
+ atomic64_t rx_latency;
+#define LPFC_CGN_NOT_SENT 0xFFFFFFFFFFFFFFFFLL
+ atomic_t rx_io_cnt;
+};
+
+struct lpfc_cgn_acqe_stat {
+ atomic64_t alarm;
+ atomic64_t warn;
+};
+
struct lpfc_vport {
struct lpfc_hba *phba;
struct list_head listentry;
@@ -869,7 +1029,10 @@ struct lpfc_hba {
* capability
*/
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
+#define HBA_CGN_RSVD1 0x200000 /* Reserved CGN flag */
+#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */
#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
+#define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */
#define HBA_NEEDS_CFG_PORT 0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */
#define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */
#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */
@@ -922,7 +1085,6 @@ struct lpfc_hba {
uint8_t wwpn[8];
uint32_t RandomData[7];
uint8_t fcp_embed_io;
- uint8_t nvme_support; /* Firmware supports NVME */
uint8_t nvmet_support; /* driver supports NVMET */
#define LPFC_NVMET_MAX_PORTS 32
uint8_t mds_diags_support;
@@ -1121,6 +1283,7 @@ struct lpfc_hba {
uint32_t total_iocbq_bufs;
struct list_head active_rrq_list;
spinlock_t hbalock;
+ struct work_struct unblock_request_work; /* SCSI layer unblock IOs */
/* dma_mem_pools */
struct dma_pool *lpfc_sg_dma_buf_pool;
@@ -1194,6 +1357,8 @@ struct lpfc_hba {
#ifdef LPFC_HDWQ_LOCK_STAT
struct dentry *debug_lockstat;
#endif
+ struct dentry *debug_cgn_buffer;
+ struct dentry *debug_rx_monitor;
struct dentry *debug_ras_log;
atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size;
@@ -1344,6 +1509,76 @@ struct lpfc_hba {
uint64_t ktime_seg10_min;
uint64_t ktime_seg10_max;
#endif
+ /* CMF objects */
+ struct lpfc_cgn_stat __percpu *cmf_stat;
+ uint32_t cmf_interval_rate; /* timer interval limit in ms */
+ uint32_t cmf_timer_cnt;
+#define LPFC_CMF_INTERVAL 90
+ uint64_t cmf_link_byte_count;
+ uint64_t cmf_max_line_rate;
+ uint64_t cmf_max_bytes_per_interval;
+ uint64_t cmf_last_sync_bw;
+#define LPFC_CMF_BLK_SIZE 512
+ struct hrtimer cmf_timer;
+ atomic_t cmf_bw_wait;
+ atomic_t cmf_busy;
+ atomic_t cmf_stop_io; /* To block request and stop IO's */
+ uint32_t cmf_active_mode;
+ uint32_t cmf_info_per_interval;
+#define LPFC_MAX_CMF_INFO 32
+ struct timespec64 cmf_latency; /* Interval congestion timestamp */
+ uint32_t cmf_last_ts; /* Interval congestion time (ms) */
+ uint32_t cmf_active_info;
+
+ /* Signal / FPIN handling for Congestion Mgmt */
+ u8 cgn_reg_fpin; /* Negotiated value from RDF */
+ u8 cgn_init_reg_fpin; /* Initial value from READ_CONFIG */
+#define LPFC_CGN_FPIN_NONE 0x0
+#define LPFC_CGN_FPIN_WARN 0x1
+#define LPFC_CGN_FPIN_ALARM 0x2
+#define LPFC_CGN_FPIN_BOTH (LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM)
+
+ u8 cgn_reg_signal; /* Negotiated value from EDC */
+ u8 cgn_init_reg_signal; /* Initial value from READ_CONFIG */
+ /* cgn_reg_signal and cgn_init_reg_signal use
+ * enum fc_edc_cg_signal_cap_types
+ */
+ u16 cgn_fpin_frequency;
+#define LPFC_FPIN_INIT_FREQ 0xffff
+ u32 cgn_sig_freq;
+ u32 cgn_acqe_cnt;
+
+ /* RX monitor handling for CMF */
+ struct rxtable_entry *rxtable; /* RX_monitor information */
+ atomic_t rxtable_idx_head;
+#define LPFC_RXMONITOR_TABLE_IN_USE (LPFC_MAX_RXMONITOR_ENTRY + 73)
+ atomic_t rxtable_idx_tail;
+ atomic_t rx_max_read_cnt; /* Maximum read bytes */
+ uint64_t rx_block_cnt;
+
+ /* Congestion parameters from flash */
+ struct lpfc_cgn_param cgn_p;
+
+ /* Statistics counter for ACQE cgn alarms and warnings */
+ struct lpfc_cgn_acqe_stat cgn_acqe_stat;
+
+ /* Congestion buffer information */
+ struct lpfc_dmabuf *cgn_i; /* Congestion Info buffer */
+ atomic_t cgn_fabric_warn_cnt; /* Total warning cgn events for info */
+ atomic_t cgn_fabric_alarm_cnt; /* Total alarm cgn events for info */
+ atomic_t cgn_sync_warn_cnt; /* Total warning events for SYNC wqe */
+ atomic_t cgn_sync_alarm_cnt; /* Total alarm events for SYNC wqe */
+ atomic_t cgn_driver_evt_cnt; /* Total driver cgn events for fmw */
+ atomic_t cgn_latency_evt_cnt;
+ struct timespec64 cgn_daily_ts;
+ atomic64_t cgn_latency_evt; /* Avg latency per minute */
+ unsigned long cgn_evt_timestamp;
+#define LPFC_CGN_TIMER_TO_MIN 60000 /* ms in a minute */
+ uint32_t cgn_evt_minute;
+#define LPFC_SEC_MIN 60
+#define LPFC_MIN_HOUR 60
+#define LPFC_HOUR_DAY 24
+#define LPFC_MIN_DAY (LPFC_MIN_HOUR * LPFC_HOUR_DAY)
struct hlist_node cpuhp; /* used for cpuhp per hba callback */
struct timer_list cpuhp_poll_timer;
@@ -1364,6 +1599,22 @@ struct lpfc_hba {
struct dbg_log_ent dbg_log[DBG_LOG_SZ];
};
+#define LPFC_MAX_RXMONITOR_ENTRY 800
+#define LPFC_MAX_RXMONITOR_DUMP 32
+struct rxtable_entry {
+ uint64_t total_bytes; /* Total no of read bytes requested */
+ uint64_t rcv_bytes; /* Total no of read bytes completed */
+ uint64_t avg_io_size;
+ uint64_t avg_io_latency;/* Average io latency in microseconds */
+ uint64_t max_read_cnt; /* Maximum read bytes */
+ uint64_t max_bytes_per_interval;
+ uint32_t cmf_busy;
+ uint32_t cmf_info; /* CMF_SYNC_WQE info */
+ uint32_t io_cnt;
+ uint32_t timer_utilization;
+ uint32_t timer_interval;
+};
+
static inline struct Scsi_Host *
lpfc_shost_from_vport(struct lpfc_vport *vport)
{
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index eb88aaaf36eb..b35bf70a8c0d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -57,6 +57,8 @@
#define LPFC_MIN_DEVLOSS_TMO 1
#define LPFC_MAX_DEVLOSS_TMO 255
+#define LPFC_MAX_INFO_TMP_LEN 100
+#define LPFC_INFO_MORE_STR "\nCould be more info...\n"
/*
* Write key size should be multiple of 4. If write key is changed
* make sure that library write key is also changed.
@@ -112,6 +114,186 @@ lpfc_jedec_to_ascii(int incr, char hdw[])
return;
}
+static ssize_t
+lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_cgn_info *cp = NULL;
+ struct lpfc_cgn_stat *cgs;
+ int len = 0;
+ int cpu;
+ u64 rcv, total;
+ char tmp[LPFC_MAX_INFO_TMP_LEN] = {0};
+
+ if (phba->cgn_i)
+ cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+
+ scnprintf(tmp, sizeof(tmp),
+ "Congestion Mgmt Info: E2Eattr %d Ver %d "
+ "CMF %d cnt %d\n",
+ phba->sli4_hba.pc_sli4_params.mi_ver,
+ cp ? cp->cgn_info_version : 0,
+ phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt);
+
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ if (!phba->sli4_hba.pc_sli4_params.cmf)
+ goto buffer_done;
+
+ switch (phba->cgn_init_reg_signal) {
+ case EDC_CG_SIG_WARN_ONLY:
+ scnprintf(tmp, sizeof(tmp),
+ "Register: Init: Signal:WARN ");
+ break;
+ case EDC_CG_SIG_WARN_ALARM:
+ scnprintf(tmp, sizeof(tmp),
+ "Register: Init: Signal:WARN|ALARM ");
+ break;
+ default:
+ scnprintf(tmp, sizeof(tmp),
+ "Register: Init: Signal:NONE ");
+ break;
+ }
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ switch (phba->cgn_init_reg_fpin) {
+ case LPFC_CGN_FPIN_WARN:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:WARN\n");
+ break;
+ case LPFC_CGN_FPIN_ALARM:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:ALARM\n");
+ break;
+ case LPFC_CGN_FPIN_BOTH:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:WARN|ALARM\n");
+ break;
+ default:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:NONE\n");
+ break;
+ }
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ switch (phba->cgn_reg_signal) {
+ case EDC_CG_SIG_WARN_ONLY:
+ scnprintf(tmp, sizeof(tmp),
+ " Current: Signal:WARN ");
+ break;
+ case EDC_CG_SIG_WARN_ALARM:
+ scnprintf(tmp, sizeof(tmp),
+ " Current: Signal:WARN|ALARM ");
+ break;
+ default:
+ scnprintf(tmp, sizeof(tmp),
+ " Current: Signal:NONE ");
+ break;
+ }
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ switch (phba->cgn_reg_fpin) {
+ case LPFC_CGN_FPIN_WARN:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:WARN ACQEcnt:%d\n", phba->cgn_acqe_cnt);
+ break;
+ case LPFC_CGN_FPIN_ALARM:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt);
+ break;
+ case LPFC_CGN_FPIN_BOTH:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:WARN|ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt);
+ break;
+ default:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:NONE ACQEcnt:%d\n", phba->cgn_acqe_cnt);
+ break;
+ }
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ if (phba->cmf_active_mode != phba->cgn_p.cgn_param_mode) {
+ switch (phba->cmf_active_mode) {
+ case LPFC_CFG_OFF:
+ scnprintf(tmp, sizeof(tmp), "Active: Mode:Off\n");
+ break;
+ case LPFC_CFG_MANAGED:
+ scnprintf(tmp, sizeof(tmp), "Active: Mode:Managed\n");
+ break;
+ case LPFC_CFG_MONITOR:
+ scnprintf(tmp, sizeof(tmp), "Active: Mode:Monitor\n");
+ break;
+ default:
+ scnprintf(tmp, sizeof(tmp), "Active: Mode:Unknown\n");
+ }
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+
+ switch (phba->cgn_p.cgn_param_mode) {
+ case LPFC_CFG_OFF:
+ scnprintf(tmp, sizeof(tmp), "Config: Mode:Off ");
+ break;
+ case LPFC_CFG_MANAGED:
+ scnprintf(tmp, sizeof(tmp), "Config: Mode:Managed ");
+ break;
+ case LPFC_CFG_MONITOR:
+ scnprintf(tmp, sizeof(tmp), "Config: Mode:Monitor ");
+ break;
+ default:
+ scnprintf(tmp, sizeof(tmp), "Config: Mode:Unknown ");
+ }
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ total = 0;
+ rcv = 0;
+ for_each_present_cpu(cpu) {
+ cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+ total += atomic64_read(&cgs->total_bytes);
+ rcv += atomic64_read(&cgs->rcv_bytes);
+ }
+
+ scnprintf(tmp, sizeof(tmp),
+ "IObusy:%d Info:%d Bytes: Rcv:x%llx Total:x%llx\n",
+ atomic_read(&phba->cmf_busy),
+ phba->cmf_active_info, rcv, total);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "Port_speed:%d Link_byte_cnt:%ld "
+ "Max_byte_per_interval:%ld\n",
+ lpfc_sli_port_speed_get(phba),
+ (unsigned long)phba->cmf_link_byte_count,
+ (unsigned long)phba->cmf_max_bytes_per_interval);
+ strlcat(buf, tmp, PAGE_SIZE);
+
+buffer_done:
+ len = strnlen(buf, PAGE_SIZE);
+
+ if (unlikely(len >= (PAGE_SIZE - 1))) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6312 Catching potential buffer "
+ "overflow > PAGE_SIZE = %lu bytes\n",
+ PAGE_SIZE);
+ strscpy(buf + PAGE_SIZE - 1 -
+ strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1),
+ LPFC_INFO_MORE_STR,
+ strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1)
+ + 1);
+ }
+ return len;
+}
+
/**
* lpfc_drvr_version_show - Return the Emulex driver string with version number
* @dev: class unused variable.
@@ -168,7 +350,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
char *statep;
int i;
int len = 0;
- char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
+ char tmp[LPFC_MAX_INFO_TMP_LEN] = {0};
if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
@@ -512,9 +694,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
"6314 Catching potential buffer "
"overflow > PAGE_SIZE = %lu bytes\n",
PAGE_SIZE);
- strlcpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_NVME_INFO_MORE_STR),
- LPFC_NVME_INFO_MORE_STR,
- sizeof(LPFC_NVME_INFO_MORE_STR) + 1);
+ strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
+ LPFC_INFO_MORE_STR,
+ sizeof(LPFC_INFO_MORE_STR) + 1);
}
return len;
@@ -2248,11 +2430,6 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
}
-static inline bool lpfc_rangecheck(uint val, uint min, uint max)
-{
- return val >= min && val <= max;
-}
-
/**
* lpfc_enable_bbcr_set: Sets an attribute value.
* @phba: pointer the the adapter structure.
@@ -2641,6 +2818,7 @@ static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
NULL);
+static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
#define WWN_SZ 8
@@ -4038,6 +4216,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
const char *val_buf = buf;
int err;
uint32_t prev_val;
+ u8 sli_family, if_type;
if (!strncmp(buf, "nolip ", strlen("nolip "))) {
nolip = 1;
@@ -4061,13 +4240,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
/*
* The 'topology' is not a configurable parameter if :
* - persistent topology enabled
- * - G7/G6 with no private loop support
+ * - ASIC_GEN_NUM >= 0xC, with no private loop support
*/
-
+ sli_family = bf_get(lpfc_sli_intf_sli_family,
+ &phba->sli4_hba.sli_intf);
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
- (!phba->sli4_hba.pc_sli4_params.pls &&
- (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
- phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
+ (!phba->sli4_hba.pc_sli4_params.pls &&
+ (sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
+ if_type == LPFC_SLI_INTF_IF_TYPE_6))) &&
val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3114 Loop mode not supported\n");
@@ -5412,9 +5594,9 @@ LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
/*
# lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
-# is [0,1]. Default value is 0.
+# is [0,1]. Default value is 1.
*/
-LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
+LPFC_VPORT_ATTR_RW(use_adisc, 1, 0, 1,
"Use ADISC on rediscovery to authenticate FCP devices");
/*
@@ -6146,6 +6328,19 @@ LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
*/
LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
+/* Signaling module parameters */
+int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
+module_param(lpfc_fabric_cgn_frequency, int, 0444);
+MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq");
+
+int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */
+module_param(lpfc_acqe_cgn_frequency, int, 0444);
+MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq");
+
+int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */
+module_param(lpfc_use_cgn_signal, int, 0444);
+MODULE_PARM_DESC(lpfc_use_cgn_signal, "Use Congestion signaling if available");
+
/*
* lpfc_enable_dpp: Enable DPP on G7
* 0 = DPP on G7 disabled
@@ -6320,6 +6515,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_enable_bbcr,
&dev_attr_lpfc_enable_dpp,
&dev_attr_lpfc_enable_mi,
+ &dev_attr_cmf_info,
&dev_attr_lpfc_max_vmid,
&dev_attr_lpfc_vmid_inactivity_timeout,
&dev_attr_lpfc_vmid_app_header,
@@ -6350,6 +6546,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
&dev_attr_lpfc_static_vport,
+ &dev_attr_cmf_info,
NULL,
};
@@ -6741,6 +6938,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
case LPFC_LINK_SPEED_128GHZ:
fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
break;
+ case LPFC_LINK_SPEED_256GHZ:
+ fc_host_speed(shost) = FC_PORTSPEED_256GBIT;
+ break;
default:
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
@@ -6908,6 +7108,9 @@ lpfc_get_stats(struct Scsi_Host *shost)
hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
hs->error_frames = pmb->un.varRdLnk.crcCnt;
+ hs->cn_sig_warn = atomic64_read(&phba->cgn_acqe_stat.warn);
+ hs->cn_sig_alarm = atomic64_read(&phba->cgn_acqe_stat.alarm);
+
hs->link_failure_count -= lso->link_failure_count;
hs->loss_of_sync_count -= lso->loss_of_sync_count;
hs->loss_of_signal_count -= lso->loss_of_signal_count;
@@ -7019,6 +7222,12 @@ lpfc_reset_stats(struct Scsi_Host *shost)
else
lso->link_events = (phba->fc_eventTag >> 1);
+ atomic64_set(&phba->cgn_acqe_stat.warn, 0);
+ atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
+
+ memset(&shost_to_fc_host(shost)->fpin_stats, 0,
+ sizeof(shost_to_fc_host(shost)->fpin_stats));
+
psli->stats_start = ktime_get_seconds();
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -7452,6 +7661,12 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
lpfc_enable_mi_init(phba, lpfc_enable_mi);
+ phba->cgn_p.cgn_param_mode = LPFC_CFG_OFF;
+ phba->cmf_active_mode = LPFC_CFG_OFF;
+ if (lpfc_fabric_cgn_frequency > EDC_CG_SIGFREQ_CNT_MAX ||
+ lpfc_fabric_cgn_frequency < EDC_CG_SIGFREQ_CNT_MIN)
+ lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
+
if (phba->sli_rev != LPFC_SLI_REV4) {
/* NVME only supported on SLI4 */
phba->nvmet_support = 0;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 38cfe1bc6a4d..fdf08cb57207 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -5751,6 +5751,92 @@ job_error:
}
+static int
+lpfc_get_cgnbuf_info(struct bsg_job *job)
+{
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct get_cgnbuf_info_req *cgnbuf_req;
+ struct lpfc_cgn_info *cp;
+ uint8_t *cgn_buff;
+ int size, cinfosz;
+ int rc = 0;
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct get_cgnbuf_info_req)) {
+ rc = -ENOMEM;
+ goto job_exit;
+ }
+
+ if (!phba->sli4_hba.pc_sli4_params.cmf) {
+ rc = -ENOENT;
+ goto job_exit;
+ }
+
+ if (!phba->cgn_i || !phba->cgn_i->virt) {
+ rc = -ENOENT;
+ goto job_exit;
+ }
+
+ cp = phba->cgn_i->virt;
+ if (cp->cgn_info_version < LPFC_CGN_INFO_V3) {
+ rc = -EPERM;
+ goto job_exit;
+ }
+
+ cgnbuf_req = (struct get_cgnbuf_info_req *)
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
+
+ /* For reset or size == 0 */
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) {
+ lpfc_init_congestion_stat(phba);
+ goto job_exit;
+ }
+
+ /* We don't want to include the CRC at the end */
+ cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t);
+
+ size = cgnbuf_req->read_size;
+ if (!size)
+ goto job_exit;
+
+ if (size < cinfosz) {
+ /* Just copy back what we can */
+ cinfosz = size;
+ rc = -E2BIG;
+ }
+
+ /* Allocate memory to read congestion info */
+ cgn_buff = vmalloc(cinfosz);
+ if (!cgn_buff) {
+ rc = -ENOMEM;
+ goto job_exit;
+ }
+
+ memcpy(cgn_buff, cp, cinfosz);
+
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ cgn_buff, cinfosz);
+
+ vfree(cgn_buff);
+
+job_exit:
+ bsg_reply->result = rc;
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2724 GET CGNBUF error: %d\n", rc);
+ return rc;
+}
+
/**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
* @job: fc_bsg_job to handle
@@ -5813,6 +5899,9 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
rc = lpfc_get_trunk_info(job);
break;
+ case LPFC_BSG_VENDOR_GET_CGNBUF_INFO:
+ rc = lpfc_get_cgnbuf_info(job);
+ break;
default:
rc = -EINVAL;
bsg_reply->reply_payload_rcv_len = 0;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 2dc71243775d..749d6c43cfce 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -43,6 +43,7 @@
#define LPFC_BSG_VENDOR_RAS_GET_CONFIG 18
#define LPFC_BSG_VENDOR_RAS_SET_CONFIG 19
#define LPFC_BSG_VENDOR_GET_TRUNK_INFO 20
+#define LPFC_BSG_VENDOR_GET_CGNBUF_INFO 21
struct set_ct_event {
uint32_t command;
@@ -386,6 +387,13 @@ struct get_trunk_info_req {
uint32_t command;
};
+struct get_cgnbuf_info_req {
+ uint32_t command;
+ uint32_t read_size;
+ uint32_t reset;
+#define LPFC_BSG_CGN_RESET_STAT 1
+};
+
/* driver only */
#define SLI_CONFIG_NOT_HANDLED 0
#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 737483c3f01d..c512f4199142 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -58,6 +58,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
uint16_t, uint16_t, bool);
int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_reg_congestion_buf(struct lpfc_hba *phba);
+int lpfc_unreg_congestion_buf(struct lpfc_hba *phba);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
@@ -74,6 +76,20 @@ int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
void lpfc_free_iocb_list(struct lpfc_hba *phba);
int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq, int count, int idx);
+uint32_t lpfc_calc_cmf_latency(struct lpfc_hba *phba);
+void lpfc_cmf_signal_init(struct lpfc_hba *phba);
+void lpfc_cmf_start(struct lpfc_hba *phba);
+void lpfc_cmf_stop(struct lpfc_hba *phba);
+void lpfc_init_congestion_stat(struct lpfc_hba *phba);
+void lpfc_init_congestion_buf(struct lpfc_hba *phba);
+int lpfc_sli4_cgn_params_read(struct lpfc_hba *phba);
+uint32_t lpfc_cgn_calc_crc32(void *bufp, uint32_t sz, uint32_t seed);
+int lpfc_config_cgn_signal(struct lpfc_hba *phba);
+int lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total);
+void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba);
+void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag);
+void lpfc_unblock_requests(struct lpfc_hba *phba);
+void lpfc_block_requests(struct lpfc_hba *phba);
void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -87,6 +103,8 @@ void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
+void lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp);
+void lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp);
void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_set_disctmo(struct lpfc_vport *);
int lpfc_can_disctmo(struct lpfc_vport *);
@@ -141,6 +159,8 @@ int lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry);
int lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry);
int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
int lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry);
+int lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry);
+void lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length);
int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -213,6 +233,9 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
+int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap,
+ uint32_t len);
+
void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
void lpfc_sli4_poll_hbtimer(struct timer_list *t);
@@ -459,6 +482,9 @@ void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
void lpfc_create_static_vport(struct lpfc_hba *);
void lpfc_stop_hba_timers(struct lpfc_hba *);
void lpfc_stop_port(struct lpfc_hba *);
+int lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t sz);
+int lpfc_update_cmf_cmpl(struct lpfc_hba *phba, uint64_t val, uint32_t sz,
+ struct Scsi_Host *shost);
void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
@@ -605,6 +631,10 @@ extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
extern unsigned long lpfc_no_hba_reset[];
+extern int lpfc_acqe_cgn_frequency;
+extern int lpfc_fabric_cgn_frequency;
+extern int lpfc_use_cgn_signal;
+
extern union lpfc_wqe128 lpfc_iread_cmd_template;
extern union lpfc_wqe128 lpfc_iwrite_cmd_template;
extern union lpfc_wqe128 lpfc_icmnd_cmd_template;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 610b6dabb3b5..dfcb7d4bd7fa 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -2288,6 +2288,8 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* No retry on Vendor, RPA only done on physical port */
if (phba->link_flag & LS_CT_VEN_RPA) {
phba->link_flag &= ~LS_CT_VEN_RPA;
+ if (phba->cmf_active_mode == LPFC_CFG_OFF)
+ return;
lpfc_printf_log(phba, KERN_ERR,
LOG_DISCOVERY | LOG_ELS,
"6460 VEN FDMI RPA failure\n");
@@ -2332,24 +2334,29 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case SLI_MGMT_RPA:
if (vport->port_type == LPFC_PHYSICAL_PORT &&
- phba->cfg_enable_mi &&
- phba->sli4_hba.pc_sli4_params.mi_ver > LPFC_MIB1_SUPPORT) {
+ phba->sli4_hba.pc_sli4_params.mi_ver) {
/* mi is only for the phyical port, no vports */
if (phba->link_flag & LS_CT_VEN_RPA) {
lpfc_printf_vlog(vport, KERN_INFO,
- LOG_DISCOVERY | LOG_ELS,
+ LOG_DISCOVERY | LOG_ELS |
+ LOG_CGN_MGMT,
"6449 VEN RPA FDMI Success\n");
phba->link_flag &= ~LS_CT_VEN_RPA;
break;
}
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6210 Issue Vendor MI FDMI %x\n",
+ phba->sli4_hba.pc_sli4_params.mi_ver);
+
+ /* CGN is only for the physical port, no vports */
if (lpfc_fdmi_cmd(vport, ndlp, cmd,
LPFC_FDMI_VENDOR_ATTR_mi) == 0)
phba->link_flag |= LS_CT_VEN_RPA;
lpfc_printf_log(phba, KERN_INFO,
LOG_DISCOVERY | LOG_ELS,
"6458 Send MI FDMI:%x Flag x%x\n",
- phba->sli4_hba.pc_sli4_params.mi_value,
+ phba->sli4_hba.pc_sli4_params.mi_ver,
phba->link_flag);
} else {
lpfc_printf_log(phba, KERN_INFO,
@@ -2846,6 +2853,8 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
ae->un.AttrInt = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ if (phba->lmt & LMT_256Gb)
+ ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
if (phba->lmt & LMT_128Gb)
ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
if (phba->lmt & LMT_64Gb)
@@ -2927,6 +2936,9 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
case LPFC_LINK_SPEED_128GHZ:
ae->un.AttrInt = HBA_PORTSPEED_128GFC;
break;
+ case LPFC_LINK_SPEED_256GHZ:
+ ae->un.AttrInt = HBA_PORTSPEED_256GFC;
+ break;
default:
ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
break;
@@ -3343,7 +3355,7 @@ lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport,
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
memset(ae, 0, 256);
sprintf(mibrevision, "ELXE2EM:%04d",
- phba->sli4_hba.pc_sli4_params.mi_value);
+ phba->sli4_hba.pc_sli4_params.mi_ver);
strncpy(ae->un.AttrString, &mibrevision[0], sizeof(ae->un.AttrString));
len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
len += (len & 3) ? (4 - (len & 3)) : 4;
@@ -3884,9 +3896,8 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/**
* lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort
* @vport: pointer to a host virtual N_Port data structure.
- * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID)
- * cmdcode: FDMI command to send
- * mask: Mask of HBA or PORT Attributes to send
+ * @cmdcode: application server command code to send
+ * @vmid: pointer to vmid info structure
*
* Builds and sends a FDMI command using the CT subsystem.
*/
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 6ff85ae57e79..bd6d459afce5 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -5429,6 +5429,180 @@ lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes,
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
}
+static int
+lpfc_cgn_buffer_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ debug->buffer = vmalloc(LPFC_CGN_BUF_SIZE);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->i_private = inode->i_private;
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static ssize_t
+lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ char *buffer = debug->buffer;
+ uint32_t *ptr;
+ int cnt, len = 0;
+
+ if (!phba->sli4_hba.pc_sli4_params.mi_ver || !phba->cgn_i) {
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Congestion Mgmt is not supported\n");
+ goto out;
+ }
+ ptr = (uint32_t *)phba->cgn_i->virt;
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Congestion Buffer Header\n");
+ /* Dump the first 32 bytes */
+ cnt = 32;
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "000: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
+ *(ptr + 4), *(ptr + 5), *(ptr + 6), *(ptr + 7));
+ ptr += 8;
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Congestion Buffer Data\n");
+ while (cnt < sizeof(struct lpfc_cgn_info)) {
+ if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Truncated . . .\n");
+ break;
+ }
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "%03x: %08x %08x %08x %08x "
+ "%08x %08x %08x %08x\n",
+ cnt, *ptr, *(ptr + 1), *(ptr + 2),
+ *(ptr + 3), *(ptr + 4), *(ptr + 5),
+ *(ptr + 6), *(ptr + 7));
+ cnt += 32;
+ ptr += 8;
+ }
+out:
+ return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
+}
+
+static int
+lpfc_cgn_buffer_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ vfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+static int
+lpfc_rx_monitor_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_rx_monitor_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ debug->buffer = vmalloc(MAX_DEBUGFS_RX_TABLE_SIZE);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->i_private = inode->i_private;
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static ssize_t
+lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_rx_monitor_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ char *buffer = debug->buffer;
+ struct rxtable_entry *entry;
+ int i, len = 0, head, tail, last, start;
+
+ head = atomic_read(&phba->rxtable_idx_head);
+ while (head == LPFC_RXMONITOR_TABLE_IN_USE) {
+ /* Table is getting updated */
+ msleep(20);
+ head = atomic_read(&phba->rxtable_idx_head);
+ }
+
+ tail = atomic_xchg(&phba->rxtable_idx_tail, head);
+ if (!phba->rxtable || head == tail) {
+ len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
+ "Rxtable is empty\n");
+ goto out;
+ }
+ last = (head > tail) ? head : LPFC_MAX_RXMONITOR_ENTRY;
+ start = tail;
+
+ len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
+ " MaxBPI\t Total Data Cmd Total Data Cmpl "
+ " Latency(us) Avg IO Size\tMax IO Size IO cnt "
+ "Info BWutil(ms)\n");
+get_table:
+ for (i = start; i < last; i++) {
+ entry = &phba->rxtable[i];
+ len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
+ "%3d:%12lld %12lld\t%12lld\t"
+ "%8lldus\t%8lld\t%10lld "
+ "%8d %2d %2d(%2d)\n",
+ i, entry->max_bytes_per_interval,
+ entry->total_bytes,
+ entry->rcv_bytes,
+ entry->avg_io_latency,
+ entry->avg_io_size,
+ entry->max_read_cnt,
+ entry->io_cnt,
+ entry->cmf_info,
+ entry->timer_utilization,
+ entry->timer_interval);
+ }
+
+ if (head != last) {
+ start = 0;
+ last = head;
+ goto get_table;
+ }
+out:
+ return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
+}
+
+static int
+lpfc_rx_monitor_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_rx_monitor_debug *debug = file->private_data;
+
+ vfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
#undef lpfc_debugfs_op_disc_trc
static const struct file_operations lpfc_debugfs_op_disc_trc = {
.owner = THIS_MODULE,
@@ -5657,6 +5831,23 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
.write = lpfc_idiag_extacc_write,
.release = lpfc_idiag_cmd_release,
};
+#undef lpfc_cgn_buffer_op
+static const struct file_operations lpfc_cgn_buffer_op = {
+ .owner = THIS_MODULE,
+ .open = lpfc_cgn_buffer_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_cgn_buffer_read,
+ .release = lpfc_cgn_buffer_release,
+};
+
+#undef lpfc_rx_monitor_op
+static const struct file_operations lpfc_rx_monitor_op = {
+ .owner = THIS_MODULE,
+ .open = lpfc_rx_monitor_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_rx_monitor_read,
+ .release = lpfc_rx_monitor_release,
+};
#endif
/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
@@ -5907,6 +6098,32 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
+ /* Congestion Info Buffer */
+ scnprintf(name, sizeof(name), "cgn_buffer");
+ phba->debug_cgn_buffer =
+ debugfs_create_file(name, S_IFREG | 0644,
+ phba->hba_debugfs_root,
+ phba, &lpfc_cgn_buffer_op);
+ if (!phba->debug_cgn_buffer) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "6527 Cannot create debugfs "
+ "cgn_buffer\n");
+ goto debug_failed;
+ }
+
+ /* RX Monitor */
+ scnprintf(name, sizeof(name), "rx_monitor");
+ phba->debug_rx_monitor =
+ debugfs_create_file(name, S_IFREG | 0644,
+ phba->hba_debugfs_root,
+ phba, &lpfc_rx_monitor_op);
+ if (!phba->debug_rx_monitor) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "6528 Cannot create debugfs "
+ "rx_monitor\n");
+ goto debug_failed;
+ }
+
/* RAS log */
snprintf(name, sizeof(name), "ras_log");
phba->debug_ras_log =
@@ -6335,6 +6552,12 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
phba->debug_hbqinfo = NULL;
+ debugfs_remove(phba->debug_cgn_buffer);
+ phba->debug_cgn_buffer = NULL;
+
+ debugfs_remove(phba->debug_rx_monitor);
+ phba->debug_rx_monitor = NULL;
+
debugfs_remove(phba->debug_ras_log);
phba->debug_ras_log = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 7ab6d3b08698..a5bf71b34972 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -52,6 +52,9 @@
/* scsistat output buffer size */
#define LPFC_SCSISTAT_SIZE 8192
+/* Congestion Info Buffer size */
+#define LPFC_CGN_BUF_SIZE 8192
+
#define LPFC_DEBUG_OUT_LINE_SZ 80
/*
@@ -279,6 +282,12 @@ struct lpfc_idiag {
void *ptr_private;
};
+#define MAX_DEBUGFS_RX_TABLE_SIZE (100 * LPFC_MAX_RXMONITOR_ENTRY)
+struct lpfc_rx_monitor_debug {
+ char *i_private;
+ char *buffer;
+};
+
#else
#define lpfc_nvmeio_data(phba, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 131374a61d7e..871b665bd72e 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -78,10 +78,11 @@ struct lpfc_node_rrqs {
};
enum lpfc_fc4_xpt_flags {
- NLP_WAIT_FOR_UNREG = 0x1,
- SCSI_XPT_REGD = 0x2,
- NVME_XPT_REGD = 0x4,
- NLP_XPT_HAS_HH = 0x8,
+ NLP_XPT_REGD = 0x1,
+ SCSI_XPT_REGD = 0x2,
+ NVME_XPT_REGD = 0x4,
+ NVME_XPT_UNREG_WAIT = 0x8,
+ NLP_XPT_HAS_HH = 0x10
};
struct lpfc_nodelist {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e481f5fe29d7..1254a575fd47 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -56,6 +56,9 @@ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp, uint8_t retry);
static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
struct lpfc_iocbq *iocb);
+static void lpfc_cmpl_els_edc(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
@@ -1664,6 +1667,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
if (!new_ndlp || (new_ndlp == ndlp))
return ndlp;
+ /*
+ * Unregister from backend if not done yet. Could have been skipped
+ * due to ADISC
+ */
+ lpfc_nlp_unreg_node(vport, new_ndlp);
+
if (phba->sli_rev == LPFC_SLI_REV4) {
active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
GFP_KERNEL);
@@ -2025,9 +2034,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->un.ulpWord[4]);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (lpfc_error_lost_link(irsp))
- goto check_plogi;
- else
+ if (!lpfc_error_lost_link(irsp))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
@@ -2080,7 +2087,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
NLP_EVT_CMPL_PLOGI);
}
- check_plogi:
if (disc && vport->num_disc_nodes) {
/* Check to see if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
@@ -2607,6 +2613,14 @@ lpfc_adisc_done(struct lpfc_vport *vport)
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(vport->fc_flag & FC_RSCN_MODE) &&
(phba->sli_rev < LPFC_SLI_REV4)) {
+
+ /*
+ * If link is down, clear_la and reg_vpi will be done after
+ * flogi following a link up event
+ */
+ if (!lpfc_is_link_up(phba))
+ return;
+
/* The ADISCs are complete. Doesn't matter if they
* succeeded or failed because the ADISC completion
* routine guarantees to call the state machine and
@@ -2749,12 +2763,9 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"2755 ADISC failure DID:%06X Status:x%x/x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
- /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (lpfc_error_lost_link(irsp))
- goto check_adisc;
- else
- lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_CMPL_ADISC);
+
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_ADISC);
/* As long as this node is not registered with the SCSI or NVMe
* transport, it is no longer an active node. Otherwise
@@ -2772,7 +2783,6 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC);
- check_adisc:
/* Check to see if there are more ADISCs to be sent */
if (disc && vport->num_disc_nodes)
lpfc_more_adisc(vport);
@@ -3253,7 +3263,7 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->un.elsreq64.remoteID);
/* ELS cmd tag <ulpIoTag> completes */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
"0217 ELS cmd tag x%x completes Data: x%x x%x x%x "
"x%x\n",
irsp->ulpIoTag, irsp->ulpStatus,
@@ -3279,6 +3289,9 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case ELS_CMD_SCR:
lpfc_issue_els_scr(vport, cmdiocb->retry);
break;
+ case ELS_CMD_EDC:
+ lpfc_issue_els_edc(vport, cmdiocb->retry);
+ break;
case ELS_CMD_RDF:
cmdiocb->context1 = NULL; /* save ndlp refcnt */
lpfc_issue_els_rdf(vport, cmdiocb->retry);
@@ -3288,6 +3301,11 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
phba->fc_stat.elsRetryExceeded++;
}
+ if (cmd == ELS_CMD_EDC) {
+ /* must be called before checking uplStatus and returning */
+ lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb);
+ return;
+ }
if (irsp->ulpStatus) {
/* ELS discovery cmd completes with error */
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
@@ -3312,11 +3330,14 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
for (i = 0; i < ELS_RDF_REG_TAG_CNT &&
i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++)
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "4677 Fabric RDF Notification Grant Data: "
- "0x%08x\n",
- be32_to_cpu(
- prdf->reg_d1.desc_tags[i]));
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_CGN_MGMT,
+ "4677 Fabric RDF Notification Grant "
+ "Data: 0x%08x Reg: %x %x\n",
+ be32_to_cpu(
+ prdf->reg_d1.desc_tags[i]),
+ phba->cgn_reg_signal,
+ phba->cgn_reg_fpin);
}
out:
@@ -3375,6 +3396,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
if (phba->sli_rev == LPFC_SLI_REV4) {
rc = lpfc_reg_fab_ctrl_node(vport, ndlp);
if (rc) {
+ lpfc_els_free_iocb(phba, elsiocb);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
"0937 %s: Failed to reg fc node, rc %d\n",
__func__, rc);
@@ -3413,7 +3435,6 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
return 1;
}
- /* Keep the ndlp just in case RDF is being sent */
return 0;
}
@@ -3657,28 +3678,15 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
lpfc_enqueue_node(vport, ndlp);
}
- /* RDF ELS is not required on an NPIV VN_Port. */
- if (vport->port_type == LPFC_NPIV_PORT) {
- lpfc_nlp_put(ndlp);
+ /* RDF ELS is not required on an NPIV VN_Port. */
+ if (vport->port_type == LPFC_NPIV_PORT)
return -EACCES;
- }
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RDF);
if (!elsiocb)
return -ENOMEM;
- if (phba->sli_rev == LPFC_SLI_REV4 &&
- !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
- "0939 %s: FC_NODE x%x RPI x%x flag x%x "
- "ste x%x type x%x Not registered\n",
- __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
- ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_type);
- return -ENODEV;
- }
-
/* Configure the payload for the supported FPIN events. */
prdf = (struct lpfc_els_rdf_req *)
(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
@@ -3695,10 +3703,12 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "6444 Xmit RDF to remote NPORT x%x\n",
- ndlp->nlp_DID);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n",
+ ndlp->nlp_DID, phba->cgn_reg_signal,
+ phba->cgn_reg_fpin);
+ phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ;
elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
@@ -3739,7 +3749,7 @@ lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
/* Send LS_ACC */
if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
"1623 Failed to RDF_ACC from x%x for x%x\n",
ndlp->nlp_DID, vport->fc_myDID);
return -EIO;
@@ -3747,7 +3757,7 @@ lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Issue new RDF for reregistering */
if (lpfc_issue_els_rdf(vport, 0)) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
"2623 Failed to re register RDF for x%x\n",
vport->fc_myDID);
return -EIO;
@@ -3757,6 +3767,448 @@ lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
}
/**
+ * lpfc_least_capable_settings - helper function for EDC rsp processing
+ * @phba: pointer to lpfc hba data structure.
+ * @pcgd: pointer to congestion detection descriptor in EDC rsp.
+ *
+ * This helper routine determines the least capable setting for
+ * congestion signals, signal freq, including scale, from the
+ * congestion detection descriptor in the EDC rsp. The routine
+ * sets @phba values in preparation for a set_featues mailbox.
+ **/
+static void
+lpfc_least_capable_settings(struct lpfc_hba *phba,
+ struct fc_diag_cg_sig_desc *pcgd)
+{
+ u32 rsp_sig_cap = 0, drv_sig_cap = 0;
+ u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0;
+ struct lpfc_cgn_info *cp;
+ u32 crc;
+ u16 sig_freq;
+
+ /* Get rsp signal and frequency capabilities. */
+ rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability);
+ rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count);
+ rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units);
+
+ /* If the Fport does not support signals. Set FPIN only */
+ if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED)
+ goto out_no_support;
+
+ /* Apply the xmt scale to the xmt cycle to get the correct frequency.
+ * Adapter default is 100 millisSeconds. Convert all xmt cycle values
+ * to milliSeconds.
+ */
+ switch (rsp_sig_freq_scale) {
+ case EDC_CG_SIGFREQ_SEC:
+ rsp_sig_freq_cyc *= MSEC_PER_SEC;
+ break;
+ case EDC_CG_SIGFREQ_MSEC:
+ rsp_sig_freq_cyc = 1;
+ break;
+ default:
+ goto out_no_support;
+ }
+
+ /* Convenient shorthand. */
+ drv_sig_cap = phba->cgn_reg_signal;
+
+ /* Choose the least capable frequency. */
+ if (rsp_sig_freq_cyc > phba->cgn_sig_freq)
+ phba->cgn_sig_freq = rsp_sig_freq_cyc;
+
+ /* Should be some common signals support. Settle on least capable
+ * signal and adjust FPIN values. Initialize defaults to ease the
+ * decision.
+ */
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY &&
+ (drv_sig_cap == EDC_CG_SIG_WARN_ONLY ||
+ drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) {
+ phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
+ phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
+ }
+ if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) {
+ if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) {
+ phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM;
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE;
+ }
+ if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) {
+ phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
+ phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
+ }
+ }
+
+ if (!phba->cgn_i)
+ return;
+
+ /* Update signal frequency in congestion info buffer */
+ cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+
+ /* Frequency (in ms) Signal Warning/Signal Congestion Notifications
+ * are received by the HBA
+ */
+ sig_freq = phba->cgn_sig_freq;
+
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
+ cp->cgn_warn_freq = cpu_to_le16(sig_freq);
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+ cp->cgn_alarm_freq = cpu_to_le16(sig_freq);
+ cp->cgn_warn_freq = cpu_to_le16(sig_freq);
+ }
+ crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
+ cp->cgn_info_crc = cpu_to_le32(crc);
+ return;
+
+out_no_support:
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ phba->cgn_sig_freq = 0;
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
+}
+
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag,
+ FC_LS_TLV_DTAG_INIT);
+
+/**
+ * lpfc_cmpl_els_edc - Completion callback function for EDC
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for issuing the Exchange
+ * Diagnostic Capabilities (EDC) command. The driver issues an EDC to
+ * notify the FPort of its Congestion and Link Fault capabilities. This
+ * routine parses the FPort's response and decides on the least common
+ * values applicable to both FPort and NPort for Warnings and Alarms that
+ * are communicated via hardware signals.
+ **/
+static void
+lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp;
+ struct fc_els_edc_resp *edc_rsp;
+ struct fc_tlv_desc *tlv;
+ struct fc_diag_cg_sig_desc *pcgd;
+ struct fc_diag_lnkflt_desc *plnkflt;
+ struct lpfc_dmabuf *pcmd, *prsp;
+ const char *dtag_nm;
+ u32 *pdata, dtag;
+ int desc_cnt = 0, bytes_remain;
+ bool rcv_cap_desc = false;
+ struct lpfc_nodelist *ndlp;
+
+ irsp = &rspiocb->iocb;
+ ndlp = cmdiocb->context1;
+
+ lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
+ "EDC cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->un.elsreq64.remoteID);
+
+ /* ELS cmd tag <ulpIoTag> completes */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n",
+ irsp->ulpIoTag, irsp->ulpStatus,
+ irsp->un.ulpWord[4], irsp->ulpTimeout);
+
+ pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+ if (!pcmd)
+ goto out;
+
+ pdata = (u32 *)pcmd->virt;
+ if (!pdata)
+ goto out;
+
+ /* Need to clear signal values, send features MB and RDF with FPIN. */
+ if (irsp->ulpStatus)
+ goto out;
+
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+
+ edc_rsp = prsp->virt;
+ if (!edc_rsp)
+ goto out;
+
+ /* ELS cmd tag <ulpIoTag> completes */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ "4676 Fabric EDC Rsp: "
+ "0x%02x, 0x%08x\n",
+ edc_rsp->acc_hdr.la_cmd,
+ be32_to_cpu(edc_rsp->desc_list_len));
+
+ /*
+ * Payload length in bytes is the response descriptor list
+ * length minus the 12 bytes of Link Service Request
+ * Information descriptor in the reply.
+ */
+ bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) -
+ sizeof(struct fc_els_lsri_desc);
+ if (bytes_remain <= 0)
+ goto out;
+
+ tlv = edc_rsp->desc;
+
+ /*
+ * cycle through EDC diagnostic descriptors to find the
+ * congestion signaling capability descriptor
+ */
+ while (bytes_remain) {
+ if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6461 Truncated TLV hdr on "
+ "Diagnostic descriptor[%d]\n",
+ desc_cnt);
+ goto out;
+ }
+
+ dtag = be32_to_cpu(tlv->desc_tag);
+ switch (dtag) {
+ case ELS_DTAG_LNK_FAULT_CAP:
+ if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
+ sizeof(struct fc_diag_lnkflt_desc)) {
+ lpfc_printf_log(
+ phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6462 Truncated Link Fault Diagnostic "
+ "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
+ desc_cnt, bytes_remain,
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
+ sizeof(struct fc_diag_cg_sig_desc));
+ goto out;
+ }
+ plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
+ lpfc_printf_log(
+ phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ "4617 Link Fault Desc Data: 0x%08x 0x%08x "
+ "0x%08x 0x%08x 0x%08x\n",
+ be32_to_cpu(plnkflt->desc_tag),
+ be32_to_cpu(plnkflt->desc_len),
+ be32_to_cpu(
+ plnkflt->degrade_activate_threshold),
+ be32_to_cpu(
+ plnkflt->degrade_deactivate_threshold),
+ be32_to_cpu(plnkflt->fec_degrade_interval));
+ break;
+ case ELS_DTAG_CG_SIGNAL_CAP:
+ if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
+ sizeof(struct fc_diag_cg_sig_desc)) {
+ lpfc_printf_log(
+ phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6463 Truncated Cgn Signal Diagnostic "
+ "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
+ desc_cnt, bytes_remain,
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
+ sizeof(struct fc_diag_cg_sig_desc));
+ goto out;
+ }
+
+ pcgd = (struct fc_diag_cg_sig_desc *)tlv;
+ lpfc_printf_log(
+ phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ "4616 CGN Desc Data: 0x%08x 0x%08x "
+ "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n",
+ be32_to_cpu(pcgd->desc_tag),
+ be32_to_cpu(pcgd->desc_len),
+ be32_to_cpu(pcgd->xmt_signal_capability),
+ be32_to_cpu(pcgd->xmt_signal_frequency.count),
+ be32_to_cpu(pcgd->xmt_signal_frequency.units),
+ be32_to_cpu(pcgd->rcv_signal_capability),
+ be32_to_cpu(pcgd->rcv_signal_frequency.count),
+ be32_to_cpu(pcgd->rcv_signal_frequency.units));
+
+ /* Compare driver and Fport capabilities and choose
+ * least common.
+ */
+ lpfc_least_capable_settings(phba, pcgd);
+ rcv_cap_desc = true;
+ break;
+ default:
+ dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "4919 unknown Diagnostic "
+ "Descriptor[%d]: tag x%x (%s)\n",
+ desc_cnt, dtag, dtag_nm);
+ }
+
+ bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
+ tlv = fc_tlv_next_desc(tlv);
+ desc_cnt++;
+ }
+
+out:
+ if (!rcv_cap_desc) {
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ phba->cgn_sig_freq = 0;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
+ "4202 EDC rsp error - sending RDF "
+ "for FPIN only.\n");
+ }
+
+ lpfc_config_cgn_signal(phba);
+
+ /* Check to see if link went down during discovery */
+ lpfc_els_chk_latt(phba->pport);
+ lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
+ "EDC Cmpl: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+}
+
+static void
+lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd)
+{
+ /* We are assuming cgd was zero'ed before calling this routine */
+
+ /* Configure the congestion detection capability */
+ cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP);
+
+ /* Descriptor len doesn't include the tag or len fields. */
+ cgd->desc_len = cpu_to_be32(
+ FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc));
+
+ /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED.
+ * xmt_signal_frequency.count already set to 0.
+ * xmt_signal_frequency.units already set to 0.
+ */
+
+ if (phba->cmf_active_mode == LPFC_CFG_OFF) {
+ /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED.
+ * rcv_signal_frequency.count already set to 0.
+ * rcv_signal_frequency.units already set to 0.
+ */
+ phba->cgn_sig_freq = 0;
+ return;
+ }
+ switch (phba->cgn_reg_signal) {
+ case EDC_CG_SIG_WARN_ONLY:
+ cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY);
+ break;
+ case EDC_CG_SIG_WARN_ALARM:
+ cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM);
+ break;
+ default:
+ /* rcv_signal_capability left 0 thus no support */
+ break;
+ }
+
+ /* We start negotiation with lpfc_fabric_cgn_frequency, after
+ * the completion we settle on the higher frequency.
+ */
+ cgd->rcv_signal_frequency.count =
+ cpu_to_be16(lpfc_fabric_cgn_frequency);
+ cgd->rcv_signal_frequency.units =
+ cpu_to_be16(EDC_CG_SIGFREQ_MSEC);
+}
+
+ /**
+ * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @retry: retry counter for the command iocb.
+ *
+ * This routine issues an ELS EDC to the F-Port Controller to communicate
+ * this N_Port's support of hardware signals in its Congestion
+ * Capabilities Descriptor.
+ *
+ * Note: This routine does not check if one or more signals are
+ * set in the cgn_reg_signal parameter. The caller makes the
+ * decision to enforce cgn_reg_signal as nonzero or zero depending
+ * on the conditions. During Fabric requests, the driver
+ * requires cgn_reg_signals to be nonzero. But a dynamic request
+ * to set the congestion mode to OFF from Monitor or Manage
+ * would correctly issue an EDC with no signals enabled to
+ * turn off switch functionality and then update the FW.
+ *
+ * Return code
+ * 0 - Successfully issued edc command
+ * 1 - Failed to issue edc command
+ **/
+int
+lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_els_edc_req *edc_req;
+ struct fc_diag_cg_sig_desc *cgn_desc;
+ u16 cmdsize;
+ struct lpfc_nodelist *ndlp;
+ u8 *pcmd = NULL;
+ u32 edc_req_size, cgn_desc_size;
+ int rc;
+
+ if (vport->port_type == LPFC_NPIV_PORT)
+ return -EACCES;
+
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ return -ENODEV;
+
+ /* If HBA doesn't support signals, drop into RDF */
+ if (!phba->cgn_init_reg_signal)
+ goto try_rdf;
+
+ edc_req_size = sizeof(struct fc_els_edc);
+ cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
+ cmdsize = edc_req_size + cgn_desc_size;
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_EDC);
+ if (!elsiocb)
+ goto try_rdf;
+
+ /* Configure the payload for the supported Diagnostics capabilities. */
+ pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ memset(pcmd, 0, cmdsize);
+ edc_req = (struct lpfc_els_edc_req *)pcmd;
+ edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size);
+ edc_req->edc.edc_cmd = ELS_EDC;
+
+ cgn_desc = &edc_req->cgn_desc;
+
+ lpfc_format_edc_cgn_desc(phba, cgn_desc);
+
+ phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_CGN_MGMT,
+ "4623 Xmit EDC to remote "
+ "NPORT x%x reg_sig x%x reg_fpin:x%x\n",
+ ndlp->nlp_DID, phba->cgn_reg_signal,
+ phba->cgn_reg_fpin);
+
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return -EIO;
+ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue EDC: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ /* The additional lpfc_nlp_put will cause the following
+ * lpfc_els_free_iocb routine to trigger the rlease of
+ * the node.
+ */
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ goto try_rdf;
+ }
+ return 0;
+try_rdf:
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ rc = lpfc_issue_els_rdf(vport, 0);
+ return rc;
+}
+
+/**
* lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
* @vport: pointer to a host virtual N_Port data structure.
* @nlp: pointer to a node-list data structure.
@@ -4378,7 +4830,7 @@ out_retry:
(cmd == ELS_CMD_NVMEPRLI))
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PRLI_ISSUE);
- else
+ else if (cmd != ELS_CMD_ADISC)
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_NPR_NODE);
ndlp->nlp_last_elscmd = cmd;
@@ -4520,7 +4972,7 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
{
struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
- /* The I/O job is complete. Clear the context1 data. */
+ /* The I/O iocb is complete. Clear the context1 data. */
elsiocb->context1 = NULL;
/* context2 = cmd, context2->next = rsp, context3 = bpl */
@@ -4612,6 +5064,15 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
+
+ /* If PLOGI is being retried, PLOGI completion will cleanup the
+ * node. The NLP_NPR_2B_DISC flag needs to be retained to make
+ * progress on nodes discovered from last RSCN.
+ */
+ if ((ndlp->nlp_flag & NLP_DELAY_TMO) &&
+ (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
+ goto out;
+
/* NPort Recovery mode or node is just allocated */
if (!lpfc_nlp_not_used(ndlp)) {
/* A LOGO is completing and the node is in NPR state.
@@ -5158,6 +5619,86 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
return 0;
}
+ /**
+ * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: NPort to where rsp is directed
+ *
+ * This routine issues an EDC ACC RSP to the F-Port Controller to communicate
+ * this N_Port's support of hardware signals in its Congestion
+ * Capabilities Descriptor.
+ *
+ * Return code
+ * 0 - Successfully issued edc rsp command
+ * 1 - Failed to issue edc rsp command
+ **/
+static int
+lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_els_edc_rsp *edc_rsp;
+ struct lpfc_iocbq *elsiocb;
+ IOCB_t *icmd, *cmd;
+ uint8_t *pcmd;
+ int cmdsize, rc;
+
+ cmdsize = sizeof(struct lpfc_els_edc_rsp);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry,
+ ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ cmd = &cmdiocb->iocb;
+ icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id;
+ pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ memset(pcmd, 0, cmdsize);
+
+ edc_rsp = (struct lpfc_els_edc_rsp *)pcmd;
+ edc_rsp->edc_rsp.acc_hdr.la_cmd = ELS_LS_ACC;
+ edc_rsp->edc_rsp.desc_list_len = cpu_to_be32(
+ FC_TLV_DESC_LENGTH_FROM_SZ(struct lpfc_els_edc_rsp));
+ edc_rsp->edc_rsp.lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
+ edc_rsp->edc_rsp.lsri.desc_len = cpu_to_be32(
+ FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc));
+ edc_rsp->edc_rsp.lsri.rqst_w0.cmd = ELS_EDC;
+ lpfc_format_edc_cgn_desc(phba, &edc_rsp->cgn_desc);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue EDC ACC: did:x%x flg:x%x refcnt %d",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ kref_read(&ndlp->kref));
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+
+ phba->fc_stat.elsXmitACC++;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
+
+ /* Xmit ELS ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, "
+ "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
+ "RPI: x%x, fc_flag x%x\n",
+ rc, elsiocb->iotag, elsiocb->sli4_xritag,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi, vport->fc_flag);
+
+ return 0;
+}
+
/**
* lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
* @vport: pointer to a virtual N_Port data structure.
@@ -5657,25 +6198,40 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
/* go thru NPR nodes and issue any remaining ELS ADISCs */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
- (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
- (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(&ndlp->lock);
- ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(vport, ndlp, 0);
- sentadisc++;
- vport->num_disc_nodes++;
- if (vport->num_disc_nodes >=
- vport->cfg_discovery_threads) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_NLP_MORE;
- spin_unlock_irq(shost->host_lock);
- break;
- }
+
+ if (ndlp->nlp_state != NLP_STE_NPR_NODE ||
+ !(ndlp->nlp_flag & NLP_NPR_ADISC))
+ continue;
+
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(&ndlp->lock);
+
+ if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+ /* This node was marked for ADISC but was not picked
+ * for discovery. This is possible if the node was
+ * missing in gidft response.
+ *
+ * At time of marking node for ADISC, we skipped unreg
+ * from backend
+ */
+ lpfc_nlp_unreg_node(vport, ndlp);
+ continue;
+ }
+
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, 0);
+ sentadisc++;
+ vport->num_disc_nodes++;
+ if (vport->num_disc_nodes >=
+ vport->cfg_discovery_threads) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_NLP_MORE;
+ spin_unlock_irq(shost->host_lock);
+ break;
}
+
}
if (sentadisc == 0) {
spin_lock_irq(shost->host_lock);
@@ -6087,6 +6643,12 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
case LPFC_LINK_SPEED_64GHZ:
rdp_speed = RDP_PS_64GB;
break;
+ case LPFC_LINK_SPEED_128GHZ:
+ rdp_speed = RDP_PS_128GB;
+ break;
+ case LPFC_LINK_SPEED_256GHZ:
+ rdp_speed = RDP_PS_256GB;
+ break;
default:
rdp_speed = RDP_PS_UNKNOWN;
break;
@@ -6094,6 +6656,8 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
+ if (phba->lmt & LMT_256Gb)
+ rdp_cap |= RDP_PS_256GB;
if (phba->lmt & LMT_128Gb)
rdp_cap |= RDP_PS_128GB;
if (phba->lmt & LMT_64Gb)
@@ -6886,13 +7450,6 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
continue;
}
- /* Check to see if we need to NVME rescan this target
- * remoteport.
- */
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
- ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
- lpfc_nvme_rescan_port(vport, ndlp);
-
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
lpfc_cancel_retry_delay_tmo(vport, ndlp);
@@ -8212,6 +8769,125 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
}
/**
+ * lpfc_els_rcv_edc - Process an unsolicited EDC iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ * 0 - Successfully processed echo iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_els_edc *edc_req;
+ struct fc_tlv_desc *tlv;
+ uint8_t *payload;
+ uint32_t *ptr, dtag;
+ const char *dtag_nm;
+ int desc_cnt = 0, bytes_remain;
+ bool rcv_cap_desc = false;
+
+ payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+
+ edc_req = (struct fc_els_edc *)payload;
+ bytes_remain = be32_to_cpu(edc_req->desc_len);
+
+ ptr = (uint32_t *)payload;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ "3319 Rcv EDC payload len %d: x%x x%x x%x\n",
+ bytes_remain, be32_to_cpu(*ptr),
+ be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2)));
+
+ /* No signal support unless there is a congestion descriptor */
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ phba->cgn_sig_freq = 0;
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
+
+ if (bytes_remain <= 0)
+ goto out;
+
+ tlv = edc_req->desc;
+
+ /*
+ * cycle through EDC diagnostic descriptors to find the
+ * congestion signaling capability descriptor
+ */
+ while (bytes_remain && !rcv_cap_desc) {
+ if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6464 Truncated TLV hdr on "
+ "Diagnostic descriptor[%d]\n",
+ desc_cnt);
+ goto out;
+ }
+
+ dtag = be32_to_cpu(tlv->desc_tag);
+ switch (dtag) {
+ case ELS_DTAG_LNK_FAULT_CAP:
+ if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
+ sizeof(struct fc_diag_lnkflt_desc)) {
+ lpfc_printf_log(
+ phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6465 Truncated Link Fault Diagnostic "
+ "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
+ desc_cnt, bytes_remain,
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
+ sizeof(struct fc_diag_cg_sig_desc));
+ goto out;
+ }
+ /* No action for Link Fault descriptor for now */
+ break;
+ case ELS_DTAG_CG_SIGNAL_CAP:
+ if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
+ sizeof(struct fc_diag_cg_sig_desc)) {
+ lpfc_printf_log(
+ phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6466 Truncated cgn signal Diagnostic "
+ "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
+ desc_cnt, bytes_remain,
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
+ sizeof(struct fc_diag_cg_sig_desc));
+ goto out;
+ }
+
+ phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
+ phba->cgn_reg_signal = phba->cgn_init_reg_signal;
+
+ /* We start negotiation with lpfc_fabric_cgn_frequency.
+ * When we process the EDC, we will settle on the
+ * higher frequency.
+ */
+ phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+
+ lpfc_least_capable_settings(
+ phba, (struct fc_diag_cg_sig_desc *)tlv);
+ rcv_cap_desc = true;
+ break;
+ default:
+ dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6467 unknown Diagnostic "
+ "Descriptor[%d]: tag x%x (%s)\n",
+ desc_cnt, dtag, dtag_nm);
+ }
+ bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
+ tlv = fc_tlv_next_desc(tlv);
+ desc_cnt++;
+ }
+out:
+ /* Need to send back an ACC */
+ lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp);
+
+ lpfc_config_cgn_signal(phba);
+ return 0;
+}
+
+/**
* lpfc_els_timeout - Handler funciton to the els timer
* @t: timer context used to obtain the vport.
*
@@ -8668,50 +9344,304 @@ lpfc_send_els_event(struct lpfc_vport *vport,
}
-DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag,
- FC_LS_TLV_DTAG_INIT);
-
DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types,
FC_FPIN_LI_EVT_TYPES_INIT);
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types,
+ FC_FPIN_DELI_EVT_TYPES_INIT);
+
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types,
+ FC_FPIN_CONGN_EVT_TYPES_INIT);
+
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm,
+ fc_fpin_congn_severity_types,
+ FC_FPIN_CONGN_SEVERITY_INIT);
+
+
+/**
+ * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port
+ * @phba: Pointer to phba object.
+ * @wwnlist: Pointer to list of WWPNs in FPIN payload
+ * @cnt: count of WWPNs in FPIN payload
+ *
+ * This routine is called by LI and PC descriptors.
+ * Limit the number of WWPNs displayed to 6 log messages, 6 per log message
+ */
+static void
+lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt)
+{
+ char buf[LPFC_FPIN_WWPN_LINE_SZ];
+ __be64 wwn;
+ u64 wwpn;
+ int i, len;
+ int line = 0;
+ int wcnt = 0;
+ bool endit = false;
+
+ len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:");
+ for (i = 0; i < cnt; i++) {
+ /* Are we on the last WWPN */
+ if (i == (cnt - 1))
+ endit = true;
+
+ /* Extract the next WWPN from the payload */
+ wwn = *wwnlist++;
+ wwpn = be64_to_cpu(wwn);
+ len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ,
+ " %016llx", wwpn);
+
+ /* Log a message if we are on the last WWPN
+ * or if we hit the max allowed per message.
+ */
+ wcnt++;
+ if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) {
+ buf[len] = 0;
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "4686 %s\n", buf);
+
+ /* Check if we reached the last WWPN */
+ if (endit)
+ return;
+
+ /* Limit the number of log message displayed per FPIN */
+ line++;
+ if (line == LPFC_FPIN_WWPN_NUM_LINE) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "4687 %d WWPNs Truncated\n",
+ cnt - i - 1);
+ return;
+ }
+
+ /* Start over with next log message */
+ wcnt = 0;
+ len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ,
+ "Additional WWPNs:");
+ }
+ }
+}
+
/**
* lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event.
- * @vport: Pointer to vport object.
+ * @phba: Pointer to phba object.
* @tlv: Pointer to the Link Integrity Notification Descriptor.
*
- * This function processes a link integrity FPIN event by
- * logging a message
+ * This function processes a Link Integrity FPIN event by logging a message.
**/
static void
-lpfc_els_rcv_fpin_li(struct lpfc_vport *vport, struct fc_tlv_desc *tlv)
+lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
{
struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv;
const char *li_evt_str;
- u32 li_evt;
+ u32 li_evt, cnt;
li_evt = be16_to_cpu(li->event_type);
li_evt_str = lpfc_get_fpin_li_event_nm(li_evt);
+ cnt = be32_to_cpu(li->pname_count);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "4680 FPIN Link Integrity %s (x%x) "
- "Detecting PN x%016llx Attached PN x%016llx "
- "Duration %d mSecs Count %d Port Cnt %d\n",
- li_evt_str, li_evt,
- be64_to_cpu(li->detecting_wwpn),
- be64_to_cpu(li->attached_wwpn),
- be32_to_cpu(li->event_threshold),
- be32_to_cpu(li->event_count),
- be32_to_cpu(li->pname_count));
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "4680 FPIN Link Integrity %s (x%x) "
+ "Detecting PN x%016llx Attached PN x%016llx "
+ "Duration %d mSecs Count %d Port Cnt %d\n",
+ li_evt_str, li_evt,
+ be64_to_cpu(li->detecting_wwpn),
+ be64_to_cpu(li->attached_wwpn),
+ be32_to_cpu(li->event_threshold),
+ be32_to_cpu(li->event_count), cnt);
+
+ lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt);
+}
+
+/**
+ * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event.
+ * @phba: Pointer to hba object.
+ * @tlv: Pointer to the Delivery Notification Descriptor TLV
+ *
+ * This function processes a Delivery FPIN event by logging a message.
+ **/
+static void
+lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
+{
+ struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv;
+ const char *del_rsn_str;
+ u32 del_rsn;
+ __be32 *frame;
+
+ del_rsn = be16_to_cpu(del->deli_reason_code);
+ del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn);
+
+ /* Skip over desc_tag/desc_len header to payload */
+ frame = (__be32 *)(del + 1);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "4681 FPIN Delivery %s (x%x) "
+ "Detecting PN x%016llx Attached PN x%016llx "
+ "DiscHdr0 x%08x "
+ "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x "
+ "DiscHdr4 x%08x DiscHdr5 x%08x\n",
+ del_rsn_str, del_rsn,
+ be64_to_cpu(del->detecting_wwpn),
+ be64_to_cpu(del->attached_wwpn),
+ be32_to_cpu(frame[0]),
+ be32_to_cpu(frame[1]),
+ be32_to_cpu(frame[2]),
+ be32_to_cpu(frame[3]),
+ be32_to_cpu(frame[4]),
+ be32_to_cpu(frame[5]));
}
+/**
+ * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event.
+ * @phba: Pointer to hba object.
+ * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV
+ *
+ * This function processes a Peer Congestion FPIN event by logging a message.
+ **/
static void
-lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin,
- u32 fpin_length)
+lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
{
- struct fc_tlv_desc *tlv;
+ struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv;
+ const char *pc_evt_str;
+ u32 pc_evt, cnt;
+
+ pc_evt = be16_to_cpu(pc->event_type);
+ pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt);
+ cnt = be32_to_cpu(pc->pname_count);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS,
+ "4684 FPIN Peer Congestion %s (x%x) "
+ "Duration %d mSecs "
+ "Detecting PN x%016llx Attached PN x%016llx "
+ "Impacted Port Cnt %d\n",
+ pc_evt_str, pc_evt,
+ be32_to_cpu(pc->event_period),
+ be64_to_cpu(pc->detecting_wwpn),
+ be64_to_cpu(pc->attached_wwpn),
+ cnt);
+
+ lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt);
+}
+
+/**
+ * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification
+ * @phba: Pointer to hba object.
+ * @tlv: Pointer to the Congestion Notification Descriptor TLV
+ *
+ * This function processes an FPIN Congestion Notifiction. The notification
+ * could be an Alarm or Warning. This routine feeds that data into driver's
+ * running congestion algorithm. It also processes the FPIN by
+ * logging a message. It returns 1 to indicate deliver this message
+ * to the upper layer or 0 to indicate don't deliver it.
+ **/
+static int
+lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
+{
+ struct lpfc_cgn_info *cp;
+ struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv;
+ const char *cgn_evt_str;
+ u32 cgn_evt;
+ const char *cgn_sev_str;
+ u32 cgn_sev;
+ uint16_t value;
+ u32 crc;
+ bool nm_log = false;
+ int rc = 1;
+
+ cgn_evt = be16_to_cpu(cgn->event_type);
+ cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt);
+ cgn_sev = cgn->severity;
+ cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev);
+
+ /* The driver only takes action on a Credit Stall or Oversubscription
+ * event type to engage the IO algorithm. The driver prints an
+ * unmaskable message only for Lost Credit and Credit Stall.
+ * TODO: Still need to have definition of host action on clear,
+ * lost credit and device specific event types.
+ */
+ switch (cgn_evt) {
+ case FPIN_CONGN_LOST_CREDIT:
+ nm_log = true;
+ break;
+ case FPIN_CONGN_CREDIT_STALL:
+ nm_log = true;
+ fallthrough;
+ case FPIN_CONGN_OVERSUBSCRIPTION:
+ if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION)
+ nm_log = false;
+ switch (cgn_sev) {
+ case FPIN_CONGN_SEVERITY_ERROR:
+ /* Take action here for an Alarm event */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF) {
+ if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) {
+ /* Track of alarm cnt for cgn_info */
+ atomic_inc(&phba->cgn_fabric_alarm_cnt);
+ /* Track of alarm cnt for SYNC_WQE */
+ atomic_inc(&phba->cgn_sync_alarm_cnt);
+ }
+ goto cleanup;
+ }
+ break;
+ case FPIN_CONGN_SEVERITY_WARNING:
+ /* Take action here for a Warning event */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF) {
+ if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) {
+ /* Track of warning cnt for cgn_info */
+ atomic_inc(&phba->cgn_fabric_warn_cnt);
+ /* Track of warning cnt for SYNC_WQE */
+ atomic_inc(&phba->cgn_sync_warn_cnt);
+ }
+cleanup:
+ /* Save frequency in ms */
+ phba->cgn_fpin_frequency =
+ be32_to_cpu(cgn->event_period);
+ value = phba->cgn_fpin_frequency;
+ if (phba->cgn_i) {
+ cp = (struct lpfc_cgn_info *)
+ phba->cgn_i->virt;
+ if (phba->cgn_reg_fpin &
+ LPFC_CGN_FPIN_ALARM)
+ cp->cgn_alarm_freq =
+ cpu_to_le16(value);
+ if (phba->cgn_reg_fpin &
+ LPFC_CGN_FPIN_WARN)
+ cp->cgn_warn_freq =
+ cpu_to_le16(value);
+ crc = lpfc_cgn_calc_crc32
+ (cp,
+ LPFC_CGN_INFO_SZ,
+ LPFC_CGN_CRC32_SEED);
+ cp->cgn_info_crc = cpu_to_le32(crc);
+ }
+
+ /* Don't deliver to upper layer since
+ * driver took action on this tlv.
+ */
+ rc = 0;
+ }
+ break;
+ }
+ break;
+ }
+
+ /* Change the log level to unmaskable for the following event types. */
+ lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO),
+ LOG_CGN_MGMT | LOG_ELS,
+ "4683 FPIN CONGESTION %s type %s (x%x) Event "
+ "Duration %d mSecs\n",
+ cgn_sev_str, cgn_evt_str, cgn_evt,
+ be32_to_cpu(cgn->event_period));
+ return rc;
+}
+
+void
+lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_els_fpin *fpin = (struct fc_els_fpin *)p;
+ struct fc_tlv_desc *tlv, *first_tlv, *current_tlv;
const char *dtag_nm;
- uint32_t desc_cnt = 0, bytes_remain;
- u32 dtag;
+ int desc_cnt = 0, bytes_remain, cnt;
+ u32 dtag, deliver = 0;
+ int len;
/* FPINs handled only if we are in the right discovery state */
if (vport->port_state < LPFC_DISC_AUTH)
@@ -8721,35 +9651,92 @@ lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin,
if (fpin_length < sizeof(struct fc_els_fpin))
return;
+ /* Sanity check descriptor length. The desc_len value does not
+ * include space for the ELS command and the desc_len fields.
+ */
+ len = be32_to_cpu(fpin->desc_len);
+ if (fpin_length < len + sizeof(struct fc_els_fpin)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "4671 Bad ELS FPIN length %d: %d\n",
+ len, fpin_length);
+ return;
+ }
+
tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
+ first_tlv = tlv;
bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc);
bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
- /* process each descriptor */
+ /* process each descriptor separately */
while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
-
dtag = be32_to_cpu(tlv->desc_tag);
switch (dtag) {
case ELS_DTAG_LNK_INTEGRITY:
- lpfc_els_rcv_fpin_li(vport, tlv);
+ lpfc_els_rcv_fpin_li(phba, tlv);
+ deliver = 1;
+ break;
+ case ELS_DTAG_DELIVERY:
+ lpfc_els_rcv_fpin_del(phba, tlv);
+ deliver = 1;
+ break;
+ case ELS_DTAG_PEER_CONGEST:
+ lpfc_els_rcv_fpin_peer_cgn(phba, tlv);
+ deliver = 1;
+ break;
+ case ELS_DTAG_CONGESTION:
+ deliver = lpfc_els_rcv_fpin_cgn(phba, tlv);
break;
default:
dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "4678 skipped FPIN descriptor[%d]: "
- "tag x%x (%s)\n",
- desc_cnt, dtag, dtag_nm);
- break;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "4678 unknown FPIN descriptor[%d]: "
+ "tag x%x (%s)\n",
+ desc_cnt, dtag, dtag_nm);
+
+ /* If descriptor is bad, drop the rest of the data */
+ return;
}
+ lpfc_cgn_update_stat(phba, dtag);
+ cnt = be32_to_cpu(tlv->desc_len);
- desc_cnt++;
+ /* Sanity check descriptor length. The desc_len value does not
+ * include space for the desc_tag and the desc_len fields.
+ */
+ len -= (cnt + sizeof(struct fc_tlv_desc));
+ if (len < 0) {
+ dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "4672 Bad FPIN descriptor TLV length "
+ "%d: %d %d %s\n",
+ cnt, len, fpin_length, dtag_nm);
+ return;
+ }
+
+ current_tlv = tlv;
bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
tlv = fc_tlv_next_desc(tlv);
- }
- fc_host_fpin_rcv(lpfc_shost_from_vport(vport), fpin_length,
- (char *)fpin);
+ /* Format payload such that the FPIN delivered to the
+ * upper layer is a single descriptor FPIN.
+ */
+ if (desc_cnt)
+ memcpy(first_tlv, current_tlv,
+ (cnt + sizeof(struct fc_els_fpin)));
+
+ /* Adjust the length so that it only reflects a
+ * single descriptor FPIN.
+ */
+ fpin_length = cnt + sizeof(struct fc_els_fpin);
+ fpin->desc_len = cpu_to_be32(fpin_length);
+ fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */
+
+ /* Send every descriptor individually to the upper layer */
+ if (deliver)
+ fc_host_fpin_rcv(lpfc_shost_from_vport(vport),
+ fpin_length, (char *)fpin);
+ desc_cnt++;
+ }
}
/**
@@ -8948,6 +9935,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
+ if (newnode)
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_PRLO:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -9137,6 +10127,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* There are no replies, so no rjt codes */
break;
+ case ELS_CMD_EDC:
+ lpfc_els_rcv_edc(vport, elsiocb, ndlp);
+ break;
case ELS_CMD_RDF:
phba->fc_stat.elsRcvRDF++;
/* Accept RDF only from fabric controller */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7cc5920979f8..7195ca0275f9 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -3331,6 +3331,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
case LPFC_LINK_SPEED_32GHZ:
case LPFC_LINK_SPEED_64GHZ:
case LPFC_LINK_SPEED_128GHZ:
+ case LPFC_LINK_SPEED_256GHZ:
break;
default:
phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
@@ -3646,6 +3647,10 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->wait_4_mlo_maint_flg);
}
lpfc_mbx_process_link_up(phba, la);
+
+ if (phba->cmf_active_mode != LPFC_CFG_OFF)
+ lpfc_cmf_signal_init(phba);
+
} else if (attn_type == LPFC_ATT_LINK_DOWN ||
attn_type == LPFC_ATT_UNEXP_WWPN) {
phba->fc_stat.LinkDown++;
@@ -4208,6 +4213,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
+ int rc;
pmb->ctx_buf = NULL;
pmb->ctx_ndlp = NULL;
@@ -4283,9 +4289,23 @@ out:
/* Issue SCR just before NameServer GID_FT Query */
lpfc_issue_els_scr(vport, 0);
- if (!phba->cfg_enable_mi ||
- phba->sli4_hba.pc_sli4_params.mi_ver < LPFC_MIB3_SUPPORT)
+ /* Link was bounced or a Fabric LOGO occurred. Start EDC
+ * with initial FW values provided the congestion mode is
+ * not off. Note that signals may or may not be supported
+ * by the adapter but FPIN is provided by default for 1
+ * or both missing signals support.
+ */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF) {
+ phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
+ phba->cgn_reg_signal = phba->cgn_init_reg_signal;
+ rc = lpfc_issue_els_edc(vport, 0);
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_INIT | LOG_ELS | LOG_DISCOVERY,
+ "4220 EDC issue error x%x, Data: x%x\n",
+ rc, phba->cgn_init_reg_signal);
+ } else {
lpfc_issue_els_rdf(vport, 0);
+ }
}
vport->fc_ns_retry = 0;
@@ -4501,10 +4521,152 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
spin_unlock_irqrestore(shost->host_lock, iflags);
}
+/* Register a node with backend if not already done */
+void
+lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+
+ unsigned long iflags;
+
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
+ /* Already registered with backend, trigger rescan */
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ if (ndlp->fc4_xpt_flags & NVME_XPT_REGD &&
+ ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) {
+ lpfc_nvme_rescan_port(vport, ndlp);
+ }
+ return;
+ }
+
+ ndlp->fc4_xpt_flags |= NLP_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ if (lpfc_valid_xpt_node(ndlp)) {
+ vport->phba->nport_event_cnt++;
+ /*
+ * Tell the fc transport about the port, if we haven't
+ * already. If we have, and it's a scsi entity, be
+ */
+ lpfc_register_remote_port(vport, ndlp);
+ }
+
+ /* We are done if we do not have any NVME remote node */
+ if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME))
+ return;
+
+ /* Notify the NVME transport of this new rport. */
+ if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
+ ndlp->nlp_fc4_type & NLP_FC4_NVME) {
+ if (vport->phba->nvmet_support == 0) {
+ /* Register this rport with the transport.
+ * Only NVME Target Rports are registered with
+ * the transport.
+ */
+ if (ndlp->nlp_type & NLP_NVME_TARGET) {
+ vport->phba->nport_event_cnt++;
+ lpfc_nvme_register_port(vport, ndlp);
+ }
+ } else {
+ /* Just take an NDLP ref count since the
+ * target does not register rports.
+ */
+ lpfc_nlp_get(ndlp);
+ }
+ }
+}
+
+/* Unregister a node with backend if not already done */
+void
+lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ unsigned long iflags;
+
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ return;
+ }
+
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ if (ndlp->rport &&
+ ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
+ vport->phba->nport_event_cnt++;
+ lpfc_unregister_remote_port(ndlp);
+ }
+
+ if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
+ vport->phba->nport_event_cnt++;
+ if (vport->phba->nvmet_support == 0) {
+ /* Start devloss if target. */
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ lpfc_nvme_unregister_port(vport, ndlp);
+ } else {
+ /* NVMET has no upcall. */
+ lpfc_nlp_put(ndlp);
+ }
+ }
+
+}
+
+/*
+ * Adisc state change handling
+ */
+static void
+lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ int new_state)
+{
+ switch (new_state) {
+ /*
+ * Any state to ADISC_ISSUE
+ * Do nothing, adisc cmpl handling will trigger state changes
+ */
+ case NLP_STE_ADISC_ISSUE:
+ break;
+
+ /*
+ * ADISC_ISSUE to mapped states
+ * Trigger a registration with backend, it will be nop if
+ * already registered
+ */
+ case NLP_STE_UNMAPPED_NODE:
+ ndlp->nlp_type |= NLP_FC_NODE;
+ fallthrough;
+ case NLP_STE_MAPPED_NODE:
+ ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
+ lpfc_nlp_reg_node(vport, ndlp);
+ break;
+
+ /*
+ * ADISC_ISSUE to non-mapped states
+ * We are moving from ADISC_ISSUE to a non-mapped state because
+ * ADISC failed, we would have skipped unregistering with
+ * backend, attempt it now
+ */
+ case NLP_STE_NPR_NODE:
+ ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
+ fallthrough;
+ default:
+ lpfc_nlp_unreg_node(vport, ndlp);
+ break;
+ }
+
+}
+
static void
lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int old_state, int new_state)
{
+ /* Trap ADISC changes here */
+ if (new_state == NLP_STE_ADISC_ISSUE ||
+ old_state == NLP_STE_ADISC_ISSUE) {
+ lpfc_handle_adisc_state(vport, ndlp, new_state);
+ return;
+ }
+
if (new_state == NLP_STE_UNMAPPED_NODE) {
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
ndlp->nlp_type |= NLP_FC_NODE;
@@ -4514,60 +4676,17 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (new_state == NLP_STE_NPR_NODE)
ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
- /* FCP and NVME Transport interface */
+ /* Reg/Unreg for FCP and NVME Transport interface */
if ((old_state == NLP_STE_MAPPED_NODE ||
old_state == NLP_STE_UNMAPPED_NODE)) {
- if (ndlp->rport &&
- lpfc_valid_xpt_node(ndlp)) {
- vport->phba->nport_event_cnt++;
- lpfc_unregister_remote_port(ndlp);
- }
-
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
- vport->phba->nport_event_cnt++;
- if (vport->phba->nvmet_support == 0) {
- /* Start devloss if target. */
- if (ndlp->nlp_type & NLP_NVME_TARGET)
- lpfc_nvme_unregister_port(vport, ndlp);
- } else {
- /* NVMET has no upcall. */
- lpfc_nlp_put(ndlp);
- }
- }
+ /* For nodes marked for ADISC, Handle unreg in ADISC cmpl */
+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
+ lpfc_nlp_unreg_node(vport, ndlp);
}
- /* FCP and NVME Transport interfaces */
-
if (new_state == NLP_STE_MAPPED_NODE ||
- new_state == NLP_STE_UNMAPPED_NODE) {
- if (lpfc_valid_xpt_node(ndlp)) {
- vport->phba->nport_event_cnt++;
- /*
- * Tell the fc transport about the port, if we haven't
- * already. If we have, and it's a scsi entity, be
- */
- lpfc_register_remote_port(vport, ndlp);
- }
- /* Notify the NVME transport of this new rport. */
- if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
- ndlp->nlp_fc4_type & NLP_FC4_NVME) {
- if (vport->phba->nvmet_support == 0) {
- /* Register this rport with the transport.
- * Only NVME Target Rports are registered with
- * the transport.
- */
- if (ndlp->nlp_type & NLP_NVME_TARGET) {
- vport->phba->nport_event_cnt++;
- lpfc_nvme_register_port(vport, ndlp);
- }
- } else {
- /* Just take an NDLP ref count since the
- * target does not register rports.
- */
- lpfc_nlp_get(ndlp);
- }
- }
- }
+ new_state == NLP_STE_UNMAPPED_NODE)
+ lpfc_nlp_reg_node(vport, ndlp);
if ((new_state == NLP_STE_MAPPED_NODE) &&
(vport->stat_data_enabled)) {
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 4a5a85ed42ec..634f8fff7425 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -608,6 +608,7 @@ struct fc_vft_header {
#define ELS_CMD_LIRR 0x7A000000
#define ELS_CMD_LCB 0x81000000
#define ELS_CMD_FPIN 0x16000000
+#define ELS_CMD_EDC 0x17000000
#define ELS_CMD_QFPA 0xB0000000
#define ELS_CMD_UVEM 0xB1000000
#else /* __LITTLE_ENDIAN_BITFIELD */
@@ -652,6 +653,7 @@ struct fc_vft_header {
#define ELS_CMD_LIRR 0x7A
#define ELS_CMD_LCB 0x81
#define ELS_CMD_FPIN ELS_FPIN
+#define ELS_CMD_EDC ELS_EDC
#define ELS_CMD_QFPA 0xB0
#define ELS_CMD_UVEM 0xB1
#endif
@@ -1694,6 +1696,7 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
#define PCI_DEVICE_ID_LANCER_G6_FC 0xe300
#define PCI_DEVICE_ID_LANCER_G7_FC 0xf400
+#define PCI_DEVICE_ID_LANCER_G7P_FC 0xf500
#define PCI_DEVICE_ID_SAT_SMB 0xf011
#define PCI_DEVICE_ID_SAT_MID 0xf015
#define PCI_DEVICE_ID_RFLY 0xf095
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index eb8c735a243b..79a4872c2edb 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -20,6 +20,7 @@
* included with this package. *
*******************************************************************/
+#include <uapi/scsi/fc/fc_fs.h>
#include <uapi/scsi/fc/fc_els.h>
/* Macros to deal with bit fields. Each bit field must have 3 #defines
@@ -94,6 +95,9 @@ struct lpfc_sli_intf {
#define LPFC_SLI_INTF_FAMILY_BE3 0x1
#define LPFC_SLI_INTF_FAMILY_LNCR_A0 0xa
#define LPFC_SLI_INTF_FAMILY_LNCR_B0 0xb
+#define LPFC_SLI_INTF_FAMILY_G6 0xc
+#define LPFC_SLI_INTF_FAMILY_G7 0xd
+#define LPFC_SLI_INTF_FAMILY_G7P 0xe
#define lpfc_sli_intf_slirev_SHIFT 4
#define lpfc_sli_intf_slirev_MASK 0x0000000F
#define lpfc_sli_intf_slirev_WORD word0
@@ -393,6 +397,12 @@ struct lpfc_wcqe_complete {
#define lpfc_wcqe_c_ersp0_MASK 0x0000FFFF
#define lpfc_wcqe_c_ersp0_WORD word0
uint32_t total_data_placed;
+#define lpfc_wcqe_c_cmf_cg_SHIFT 31
+#define lpfc_wcqe_c_cmf_cg_MASK 0x00000001
+#define lpfc_wcqe_c_cmf_cg_WORD total_data_placed
+#define lpfc_wcqe_c_cmf_bw_SHIFT 0
+#define lpfc_wcqe_c_cmf_bw_MASK 0x0FFFFFFF
+#define lpfc_wcqe_c_cmf_bw_WORD total_data_placed
uint32_t parameter;
#define lpfc_wcqe_c_bg_edir_SHIFT 5
#define lpfc_wcqe_c_bg_edir_MASK 0x00000001
@@ -687,6 +697,7 @@ struct lpfc_register {
#define lpfc_sliport_eqdelay_id_MASK 0xfff
#define lpfc_sliport_eqdelay_id_WORD word0
#define LPFC_SEC_TO_USEC 1000000
+#define LPFC_SEC_TO_MSEC 1000
/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
* reside in BAR 2.
@@ -959,6 +970,12 @@ union lpfc_sli4_cfg_shdr {
#define lpfc_mbox_hdr_add_status_SHIFT 8
#define lpfc_mbox_hdr_add_status_MASK 0x000000FF
#define lpfc_mbox_hdr_add_status_WORD word7
+#define LPFC_ADD_STATUS_INCOMPAT_OBJ 0xA2
+#define lpfc_mbox_hdr_add_status_2_SHIFT 16
+#define lpfc_mbox_hdr_add_status_2_MASK 0x000000FF
+#define lpfc_mbox_hdr_add_status_2_WORD word7
+#define LPFC_ADD_STATUS_2_INCOMPAT_FLASH 0x01
+#define LPFC_ADD_STATUS_2_INCORRECT_ASIC 0x02
uint32_t response_length;
uint32_t actual_response_length;
} response;
@@ -1015,6 +1032,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_SET_HOST_DATA 0x5D
#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73
#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74
+#define LPFC_MBOX_OPCODE_REG_CONGESTION_BUF 0x8E
#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
@@ -1123,6 +1141,12 @@ struct lpfc_mbx_sge {
uint32_t length;
};
+struct lpfc_mbx_host_buf {
+ uint32_t length;
+ uint32_t pa_lo;
+ uint32_t pa_hi;
+};
+
struct lpfc_mbx_nembed_cmd {
struct lpfc_sli4_cfg_mhdr cfg_mhdr;
#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19
@@ -1133,6 +1157,31 @@ struct lpfc_mbx_nembed_sge_virt {
void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
};
+#define LPFC_MBX_OBJECT_NAME_LEN_DW 26
+struct lpfc_mbx_read_object { /* Version 0 */
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_rd_object_rlen_SHIFT 0
+#define lpfc_mbx_rd_object_rlen_MASK 0x00FFFFFF
+#define lpfc_mbx_rd_object_rlen_WORD word0
+ uint32_t rd_object_offset;
+ uint32_t rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
+#define LPFC_OBJ_NAME_SZ 104 /* 26 x sizeof(uint32_t) is 104. */
+ uint32_t rd_object_cnt;
+ struct lpfc_mbx_host_buf rd_object_hbuf[4];
+ } request;
+ struct {
+ uint32_t rd_object_actual_rlen;
+ uint32_t word1;
+#define lpfc_mbx_rd_object_eof_SHIFT 31
+#define lpfc_mbx_rd_object_eof_MASK 0x1
+#define lpfc_mbx_rd_object_eof_WORD word1
+ } response;
+ } u;
+};
+
struct lpfc_mbx_eq_create {
struct mbox_header header;
union {
@@ -1555,7 +1604,7 @@ struct rq_context {
#define lpfc_rq_context_hdr_size_WORD word1
uint32_t word2;
#define lpfc_rq_context_cq_id_SHIFT 16
-#define lpfc_rq_context_cq_id_MASK 0x000003FF
+#define lpfc_rq_context_cq_id_MASK 0x0000FFFF
#define lpfc_rq_context_cq_id_WORD word2
#define lpfc_rq_context_buf_size_SHIFT 0
#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
@@ -2328,6 +2377,7 @@ struct lpfc_mbx_redisc_fcf_tbl {
#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
#define ADD_STATUS_FW_NOT_SUPPORTED 0xEB
#define ADD_STATUS_INVALID_REQUEST 0x4B
+#define ADD_STATUS_INVALID_OBJECT_NAME 0xA0
#define ADD_STATUS_FW_DOWNLOAD_HW_DISABLED 0x58
struct lpfc_mbx_sli4_config {
@@ -2803,6 +2853,12 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
+#define lpfc_mbx_rd_conf_wcs_SHIFT 28 /* warning signaling */
+#define lpfc_mbx_rd_conf_wcs_MASK 0x00000001
+#define lpfc_mbx_rd_conf_wcs_WORD word1
+#define lpfc_mbx_rd_conf_acs_SHIFT 27 /* alarm signaling */
+#define lpfc_mbx_rd_conf_acs_MASK 0x00000001
+#define lpfc_mbx_rd_conf_acs_WORD word1
uint32_t word2;
#define lpfc_mbx_rd_conf_lnk_numb_SHIFT 0
#define lpfc_mbx_rd_conf_lnk_numb_MASK 0x0000003F
@@ -3328,17 +3384,20 @@ struct lpfc_sli4_parameters {
#define cfg_nosr_SHIFT 9
#define cfg_nosr_MASK 0x00000001
#define cfg_nosr_WORD word19
-
#define cfg_bv1s_SHIFT 10
#define cfg_bv1s_MASK 0x00000001
#define cfg_bv1s_WORD word19
-#define cfg_pvl_SHIFT 13
-#define cfg_pvl_MASK 0x00000001
-#define cfg_pvl_WORD word19
#define cfg_nsler_SHIFT 12
#define cfg_nsler_MASK 0x00000001
#define cfg_nsler_WORD word19
+#define cfg_pvl_SHIFT 13
+#define cfg_pvl_MASK 0x00000001
+#define cfg_pvl_WORD word19
+
+#define cfg_pbde_SHIFT 20
+#define cfg_pbde_MASK 0x00000001
+#define cfg_pbde_WORD word19
uint32_t word20;
#define cfg_max_tow_xri_SHIFT 0
@@ -3346,12 +3405,13 @@ struct lpfc_sli4_parameters {
#define cfg_max_tow_xri_WORD word20
uint32_t word21;
-#define cfg_mib_bde_cnt_SHIFT 16
-#define cfg_mib_bde_cnt_MASK 0x000000ff
-#define cfg_mib_bde_cnt_WORD word21
#define cfg_mi_ver_SHIFT 0
#define cfg_mi_ver_MASK 0x0000ffff
#define cfg_mi_ver_WORD word21
+#define cfg_cmf_SHIFT 24
+#define cfg_cmf_MASK 0x000000ff
+#define cfg_cmf_WORD word21
+
uint32_t mib_size;
uint32_t word23; /* RESERVED */
@@ -3380,7 +3440,10 @@ struct lpfc_sli4_parameters {
#define LPFC_SET_UE_RECOVERY 0x10
#define LPFC_SET_MDS_DIAGS 0x12
+#define LPFC_SET_CGN_SIGNAL 0x1f
#define LPFC_SET_DUAL_DUMP 0x1e
+#define LPFC_SET_ENABLE_MI 0x21
+#define LPFC_SET_ENABLE_CMF 0x24
struct lpfc_mbx_set_feature {
struct mbox_header header;
uint32_t feature;
@@ -3395,6 +3458,9 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1
#define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001
#define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6
+#define lpfc_mbx_set_feature_CGN_warn_freq_SHIFT 0
+#define lpfc_mbx_set_feature_CGN_warn_freq_MASK 0x0000ffff
+#define lpfc_mbx_set_feature_CGN_warn_freq_WORD word6
#define lpfc_mbx_set_feature_dd_SHIFT 0
#define lpfc_mbx_set_feature_dd_MASK 0x00000001
#define lpfc_mbx_set_feature_dd_WORD word6
@@ -3404,6 +3470,15 @@ struct lpfc_mbx_set_feature {
#define LPFC_DISABLE_DUAL_DUMP 0
#define LPFC_ENABLE_DUAL_DUMP 1
#define LPFC_QUERY_OP_DUAL_DUMP 2
+#define lpfc_mbx_set_feature_cmf_SHIFT 0
+#define lpfc_mbx_set_feature_cmf_MASK 0x00000001
+#define lpfc_mbx_set_feature_cmf_WORD word6
+#define lpfc_mbx_set_feature_mi_SHIFT 0
+#define lpfc_mbx_set_feature_mi_MASK 0x0000ffff
+#define lpfc_mbx_set_feature_mi_WORD word6
+#define lpfc_mbx_set_feature_milunq_SHIFT 16
+#define lpfc_mbx_set_feature_milunq_MASK 0x0000ffff
+#define lpfc_mbx_set_feature_milunq_WORD word6
uint32_t word7;
#define lpfc_mbx_set_feature_UERP_SHIFT 0
#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff
@@ -3411,16 +3486,51 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_UESR_SHIFT 16
#define lpfc_mbx_set_feature_UESR_MASK 0x0000ffff
#define lpfc_mbx_set_feature_UESR_WORD word7
+#define lpfc_mbx_set_feature_CGN_alarm_freq_SHIFT 0
+#define lpfc_mbx_set_feature_CGN_alarm_freq_MASK 0x0000ffff
+#define lpfc_mbx_set_feature_CGN_alarm_freq_WORD word7
+ u32 word8;
+#define lpfc_mbx_set_feature_CGN_acqe_freq_SHIFT 0
+#define lpfc_mbx_set_feature_CGN_acqe_freq_MASK 0x000000ff
+#define lpfc_mbx_set_feature_CGN_acqe_freq_WORD word8
};
#define LPFC_SET_HOST_OS_DRIVER_VERSION 0x2
+#define LPFC_SET_HOST_DATE_TIME 0x4
+
+struct lpfc_mbx_set_host_date_time {
+ uint32_t word6;
+#define lpfc_mbx_set_host_month_WORD word6
+#define lpfc_mbx_set_host_month_SHIFT 16
+#define lpfc_mbx_set_host_month_MASK 0xFF
+#define lpfc_mbx_set_host_day_WORD word6
+#define lpfc_mbx_set_host_day_SHIFT 8
+#define lpfc_mbx_set_host_day_MASK 0xFF
+#define lpfc_mbx_set_host_year_WORD word6
+#define lpfc_mbx_set_host_year_SHIFT 0
+#define lpfc_mbx_set_host_year_MASK 0xFF
+ uint32_t word7;
+#define lpfc_mbx_set_host_hour_WORD word7
+#define lpfc_mbx_set_host_hour_SHIFT 16
+#define lpfc_mbx_set_host_hour_MASK 0xFF
+#define lpfc_mbx_set_host_min_WORD word7
+#define lpfc_mbx_set_host_min_SHIFT 8
+#define lpfc_mbx_set_host_min_MASK 0xFF
+#define lpfc_mbx_set_host_sec_WORD word7
+#define lpfc_mbx_set_host_sec_SHIFT 0
+#define lpfc_mbx_set_host_sec_MASK 0xFF
+};
+
struct lpfc_mbx_set_host_data {
#define LPFC_HOST_OS_DRIVER_VERSION_SIZE 48
struct mbox_header header;
uint32_t param_id;
uint32_t param_len;
- uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
+ union {
+ uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
+ struct lpfc_mbx_set_host_date_time tm;
+ } un;
};
struct lpfc_mbx_set_trunk_mode {
@@ -3438,6 +3548,21 @@ struct lpfc_mbx_get_sli4_parameters {
struct lpfc_sli4_parameters sli4_parameters;
};
+struct lpfc_mbx_reg_congestion_buf {
+ struct mbox_header header;
+ uint32_t word0;
+#define lpfc_mbx_reg_cgn_buf_type_WORD word0
+#define lpfc_mbx_reg_cgn_buf_type_SHIFT 0
+#define lpfc_mbx_reg_cgn_buf_type_MASK 0xFF
+#define lpfc_mbx_reg_cgn_buf_cnt_WORD word0
+#define lpfc_mbx_reg_cgn_buf_cnt_SHIFT 16
+#define lpfc_mbx_reg_cgn_buf_cnt_MASK 0xFF
+ uint32_t word1;
+ uint32_t length;
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+};
+
struct lpfc_rscr_desc_generic {
#define LPFC_RSRC_DESC_WSIZE 22
uint32_t desc[LPFC_RSRC_DESC_WSIZE];
@@ -3603,6 +3728,9 @@ struct lpfc_controller_attribute {
#define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8
#define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff
#define lpfc_cntl_attr_eprom_ver_hi_WORD word17
+#define lpfc_cntl_attr_flash_id_SHIFT 16
+#define lpfc_cntl_attr_flash_id_MASK 0x000000ff
+#define lpfc_cntl_attr_flash_id_WORD word17
uint32_t mbx_da_struct_ver;
uint32_t ep_fw_da_struct_ver;
uint32_t ncsi_ver_str[3];
@@ -3744,6 +3872,7 @@ struct lpfc_mbx_get_port_name {
#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
#define MB_CQE_STATUS_DMA_FAILED 0x5
+
#define LPFC_MBX_WR_CONFIG_MAX_BDE 1
struct lpfc_mbx_wr_object {
struct mbox_header header;
@@ -3760,7 +3889,7 @@ struct lpfc_mbx_wr_object {
#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
#define lpfc_wr_object_write_length_WORD word4
uint32_t write_offset;
- uint32_t object_name[26];
+ uint32_t object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
uint32_t bde_count;
struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
} request;
@@ -3809,6 +3938,7 @@ struct lpfc_mqe {
struct lpfc_mbx_unreg_fcfi unreg_fcfi;
struct lpfc_mbx_mq_create mq_create;
struct lpfc_mbx_mq_create_ext mq_create_ext;
+ struct lpfc_mbx_read_object read_object;
struct lpfc_mbx_eq_create eq_create;
struct lpfc_mbx_modify_eq_delay eq_delay;
struct lpfc_mbx_cq_create cq_create;
@@ -3834,6 +3964,7 @@ struct lpfc_mqe {
struct lpfc_mbx_query_fw_config query_fw_cfg;
struct lpfc_mbx_set_beacon_config beacon_config;
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
+ struct lpfc_mbx_reg_congestion_buf reg_congestion_buf;
struct lpfc_mbx_set_link_diag_state link_diag_state;
struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
struct lpfc_mbx_run_link_diag_test link_diag_test;
@@ -3888,6 +4019,7 @@ struct lpfc_mcqe {
#define LPFC_TRAILER_CODE_GRP5 0x5
#define LPFC_TRAILER_CODE_FC 0x10
#define LPFC_TRAILER_CODE_SLI 0x11
+#define LPFC_TRAILER_CODE_CMSTAT 0x13
};
struct lpfc_acqe_link {
@@ -4122,6 +4254,19 @@ struct lpfc_acqe_misconfigured_event {
#define LPFC_SLI_EVENT_STATUS_UNCERTIFIED 0x05
};
+struct lpfc_acqe_cgn_signal {
+ u32 word0;
+#define lpfc_warn_acqe_SHIFT 0
+#define lpfc_warn_acqe_MASK 0x7FFFFFFF
+#define lpfc_warn_acqe_WORD word0
+#define lpfc_imm_acqe_SHIFT 31
+#define lpfc_imm_acqe_MASK 0x1
+#define lpfc_imm_acqe_WORD word0
+ u32 alarm_cnt;
+ u32 word2;
+ u32 trailer;
+};
+
struct lpfc_acqe_sli {
uint32_t event_data1;
uint32_t event_data2;
@@ -4134,8 +4279,10 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA
+#define LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG 0xE
#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF
#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
+#define LPFC_SLI_EVENT_TYPE_CGN_SIGNAL 0x11
};
/*
@@ -4543,6 +4690,69 @@ struct create_xri_wqe {
#define T_REQUEST_TAG 3
#define T_XRI_TAG 1
+struct cmf_sync_wqe {
+ uint32_t rsrvd[3];
+ uint32_t word3;
+#define cmf_sync_interval_SHIFT 0
+#define cmf_sync_interval_MASK 0x00000ffff
+#define cmf_sync_interval_WORD word3
+#define cmf_sync_afpin_SHIFT 16
+#define cmf_sync_afpin_MASK 0x000000001
+#define cmf_sync_afpin_WORD word3
+#define cmf_sync_asig_SHIFT 17
+#define cmf_sync_asig_MASK 0x000000001
+#define cmf_sync_asig_WORD word3
+#define cmf_sync_op_SHIFT 20
+#define cmf_sync_op_MASK 0x00000000f
+#define cmf_sync_op_WORD word3
+#define cmf_sync_ver_SHIFT 24
+#define cmf_sync_ver_MASK 0x0000000ff
+#define cmf_sync_ver_WORD word3
+#define LPFC_CMF_SYNC_VER 1
+ uint32_t event_tag;
+ uint32_t word5;
+#define cmf_sync_wsigmax_SHIFT 0
+#define cmf_sync_wsigmax_MASK 0x00000ffff
+#define cmf_sync_wsigmax_WORD word5
+#define cmf_sync_wsigcnt_SHIFT 16
+#define cmf_sync_wsigcnt_MASK 0x00000ffff
+#define cmf_sync_wsigcnt_WORD word5
+ uint32_t word6;
+ uint32_t word7;
+#define cmf_sync_cmnd_SHIFT 8
+#define cmf_sync_cmnd_MASK 0x0000000ff
+#define cmf_sync_cmnd_WORD word7
+ uint32_t word8;
+ uint32_t word9;
+#define cmf_sync_reqtag_SHIFT 0
+#define cmf_sync_reqtag_MASK 0x00000ffff
+#define cmf_sync_reqtag_WORD word9
+#define cmf_sync_wfpinmax_SHIFT 16
+#define cmf_sync_wfpinmax_MASK 0x0000000ff
+#define cmf_sync_wfpinmax_WORD word9
+#define cmf_sync_wfpincnt_SHIFT 24
+#define cmf_sync_wfpincnt_MASK 0x0000000ff
+#define cmf_sync_wfpincnt_WORD word9
+ uint32_t word10;
+#define cmf_sync_qosd_SHIFT 9
+#define cmf_sync_qosd_MASK 0x00000001
+#define cmf_sync_qosd_WORD word10
+ uint32_t word11;
+#define cmf_sync_cmd_type_SHIFT 0
+#define cmf_sync_cmd_type_MASK 0x0000000f
+#define cmf_sync_cmd_type_WORD word11
+#define cmf_sync_wqec_SHIFT 7
+#define cmf_sync_wqec_MASK 0x00000001
+#define cmf_sync_wqec_WORD word11
+#define cmf_sync_cqid_SHIFT 16
+#define cmf_sync_cqid_MASK 0x0000ffff
+#define cmf_sync_cqid_WORD word11
+ uint32_t read_bytes;
+ uint32_t word13;
+ uint32_t word14;
+ uint32_t word15;
+};
+
struct abort_cmd_wqe {
uint32_t rsrvd[3];
uint32_t word3;
@@ -4672,6 +4882,7 @@ union lpfc_wqe {
struct fcp_iread64_wqe fcp_iread;
struct fcp_iwrite64_wqe fcp_iwrite;
struct abort_cmd_wqe abort_cmd;
+ struct cmf_sync_wqe cmf_sync;
struct create_xri_wqe create_xri;
struct xmit_bcast64_wqe xmit_bcast64;
struct xmit_seq64_wqe xmit_sequence;
@@ -4692,6 +4903,7 @@ union lpfc_wqe128 {
struct fcp_iread64_wqe fcp_iread;
struct fcp_iwrite64_wqe fcp_iwrite;
struct abort_cmd_wqe abort_cmd;
+ struct cmf_sync_wqe cmf_sync;
struct create_xri_wqe create_xri;
struct xmit_bcast64_wqe xmit_bcast64;
struct xmit_seq64_wqe xmit_sequence;
@@ -4707,6 +4919,7 @@ union lpfc_wqe128 {
#define MAGIC_NUMBER_G6 0xFEAA0003
#define MAGIC_NUMBER_G7 0xFEAA0005
+#define MAGIC_NUMBER_G7P 0xFEAA0020
struct lpfc_grp_hdr {
uint32_t size;
@@ -4734,6 +4947,7 @@ struct lpfc_grp_hdr {
#define FCP_COMMAND_TRSP 0x3
#define FCP_COMMAND_TSEND 0x7
#define OTHER_COMMAND 0x8
+#define CMF_SYNC_COMMAND 0xA
#define ELS_COMMAND_NON_FIP 0xC
#define ELS_COMMAND_FIP 0xD
@@ -4755,6 +4969,7 @@ struct lpfc_grp_hdr {
#define CMD_FCP_TRECEIVE64_WQE 0xA1
#define CMD_FCP_TRSP64_WQE 0xA3
#define CMD_GEN_REQUEST64_WQE 0xC2
+#define CMD_CMF_SYNC_WQE 0xE8
#define CMD_WQE_MASK 0xff
@@ -4762,3 +4977,43 @@ struct lpfc_grp_hdr {
#define LPFC_FW_DUMP 1
#define LPFC_FW_RESET 2
#define LPFC_DV_RESET 3
+
+/* On some kernels, enum fc_ls_tlv_dtag does not have
+ * these 2 enums defined, on other kernels it does.
+ * To get aound this we need to add these 2 defines here.
+ */
+#ifndef ELS_DTAG_LNK_FAULT_CAP
+#define ELS_DTAG_LNK_FAULT_CAP 0x0001000D
+#endif
+#ifndef ELS_DTAG_CG_SIGNAL_CAP
+#define ELS_DTAG_CG_SIGNAL_CAP 0x0001000F
+#endif
+
+/*
+ * Initializer useful for decoding FPIN string table.
+ */
+#define FC_FPIN_CONGN_SEVERITY_INIT { \
+ { FPIN_CONGN_SEVERITY_WARNING, "Warning" }, \
+ { FPIN_CONGN_SEVERITY_ERROR, "Alarm" }, \
+}
+
+/* EDC supports two descriptors. When allocated, it is the
+ * size of this structure plus each supported descriptor.
+ */
+struct lpfc_els_edc_req {
+ struct fc_els_edc edc; /* hdr up to descriptors */
+ struct fc_diag_cg_sig_desc cgn_desc; /* 1st descriptor */
+};
+
+/* Minimum structure defines for the EDC response.
+ * Balance is in buffer.
+ */
+struct lpfc_els_edc_rsp {
+ struct fc_els_edc_resp edc_rsp; /* hdr up to descriptors */
+ struct fc_diag_cg_sig_desc cgn_desc; /* 1st descriptor */
+};
+
+/* Used for logging FPIN messages */
+#define LPFC_FPIN_WWPN_LINE_SZ 128
+#define LPFC_FPIN_WWPN_LINE_CNT 6
+#define LPFC_FPIN_WWPN_NUM_LINE 6
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
index d48414e295a0..6a90e6e53d09 100644
--- a/drivers/scsi/lpfc/lpfc_ids.h
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -118,6 +118,8 @@ const struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7_FC,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7P_FC,
+ PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e29523a1b530..0ec322f0e3cb 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -93,6 +93,7 @@ static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
+static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1243,7 +1244,8 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
return;
if (phba->link_state == LPFC_HBA_ERROR ||
- phba->pport->fc_flag & FC_OFFLINE_MODE)
+ phba->pport->fc_flag & FC_OFFLINE_MODE ||
+ phba->cmf_active_mode != LPFC_CFG_OFF)
goto requeue;
for_each_present_cpu(i) {
@@ -1852,6 +1854,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
{
int rc;
uint32_t intr_mode;
+ LPFC_MBOXQ_t *mboxq;
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) {
@@ -1871,11 +1874,19 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
"Recovery...\n");
/* If we are no wait, the HBA has been reset and is not
- * functional, thus we should clear LPFC_SLI_ACTIVE flag.
+ * functional, thus we should clear
+ * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
*/
if (mbx_action == LPFC_MBX_NO_WAIT) {
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
+ if (phba->sli.mbox_active) {
+ mboxq = phba->sli.mbox_active;
+ mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+ __lpfc_mbox_cmpl_put(phba, mboxq);
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = NULL;
+ }
spin_unlock_irq(&phba->hbalock);
}
@@ -2590,6 +2601,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
case PCI_DEVICE_ID_LANCER_G7_FC:
m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
break;
+ case PCI_DEVICE_ID_LANCER_G7P_FC:
+ m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
+ break;
case PCI_DEVICE_ID_SKYHAWK:
case PCI_DEVICE_ID_SKYHAWK_VF:
oneConnect = 1;
@@ -3008,6 +3022,123 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
}
/**
+ * lpfc_cmf_stop - Stop CMF processing
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This is called when the link goes down or if CMF mode is turned OFF.
+ * It is also called when going offline or unloaded just before the
+ * congestion info buffer is unregistered.
+ **/
+void
+lpfc_cmf_stop(struct lpfc_hba *phba)
+{
+ int cpu;
+ struct lpfc_cgn_stat *cgs;
+
+ /* We only do something if CMF is enabled */
+ if (!phba->sli4_hba.pc_sli4_params.cmf)
+ return;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6221 Stop CMF / Cancel Timer\n");
+
+ /* Cancel the CMF timer */
+ hrtimer_cancel(&phba->cmf_timer);
+
+ /* Zero CMF counters */
+ atomic_set(&phba->cmf_busy, 0);
+ for_each_present_cpu(cpu) {
+ cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+ atomic64_set(&cgs->total_bytes, 0);
+ atomic64_set(&cgs->rcv_bytes, 0);
+ atomic_set(&cgs->rx_io_cnt, 0);
+ atomic64_set(&cgs->rx_latency, 0);
+ }
+ atomic_set(&phba->cmf_bw_wait, 0);
+
+ /* Resume any blocked IO - Queue unblock on workqueue */
+ queue_work(phba->wq, &phba->unblock_request_work);
+}
+
+static inline uint64_t
+lpfc_get_max_line_rate(struct lpfc_hba *phba)
+{
+ uint64_t rate = lpfc_sli_port_speed_get(phba);
+
+ return ((((unsigned long)rate) * 1024 * 1024) / 10);
+}
+
+void
+lpfc_cmf_signal_init(struct lpfc_hba *phba)
+{
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6223 Signal CMF init\n");
+
+ /* Use the new fc_linkspeed to recalculate */
+ phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
+ phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
+ phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
+ phba->cmf_interval_rate, 1000);
+ phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
+
+ /* This is a signal to firmware to sync up CMF BW with link speed */
+ lpfc_issue_cmf_sync_wqe(phba, 0, 0);
+}
+
+/**
+ * lpfc_cmf_start - Start CMF processing
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This is called when the link comes up or if CMF mode is turned OFF
+ * to Monitor or Managed.
+ **/
+void
+lpfc_cmf_start(struct lpfc_hba *phba)
+{
+ struct lpfc_cgn_stat *cgs;
+ int cpu;
+
+ /* We only do something if CMF is enabled */
+ if (!phba->sli4_hba.pc_sli4_params.cmf ||
+ phba->cmf_active_mode == LPFC_CFG_OFF)
+ return;
+
+ /* Reinitialize congestion buffer info */
+ lpfc_init_congestion_buf(phba);
+
+ atomic_set(&phba->cgn_fabric_warn_cnt, 0);
+ atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
+ atomic_set(&phba->cgn_sync_alarm_cnt, 0);
+ atomic_set(&phba->cgn_sync_warn_cnt, 0);
+
+ atomic_set(&phba->cmf_busy, 0);
+ for_each_present_cpu(cpu) {
+ cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+ atomic64_set(&cgs->total_bytes, 0);
+ atomic64_set(&cgs->rcv_bytes, 0);
+ atomic_set(&cgs->rx_io_cnt, 0);
+ atomic64_set(&cgs->rx_latency, 0);
+ }
+ phba->cmf_latency.tv_sec = 0;
+ phba->cmf_latency.tv_nsec = 0;
+
+ lpfc_cmf_signal_init(phba);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6222 Start CMF / Timer\n");
+
+ phba->cmf_timer_cnt = 0;
+ hrtimer_start(&phba->cmf_timer,
+ ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
+ HRTIMER_MODE_REL);
+ /* Setup for latency check in IO cmpl routines */
+ ktime_get_real_ts64(&phba->cmf_latency);
+
+ atomic_set(&phba->cmf_bw_wait, 0);
+ atomic_set(&phba->cmf_stop_io, 0);
+}
+
+/**
* lpfc_stop_hba_timers - Stop all the timers associated with an HBA
* @phba: pointer to lpfc hba data structure.
*
@@ -3541,6 +3672,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(&ndlp->lock);
+
+ lpfc_unreg_rpi(vports[i], ndlp);
/*
* Whenever an SLI4 port goes offline, free the
* RPI. Get a new RPI when the adapter port
@@ -3556,7 +3689,6 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
- lpfc_unreg_rpi(vports[i], ndlp);
if (ndlp->nlp_type & NLP_FABRIC) {
lpfc_disc_state_machine(vports[i], ndlp,
@@ -4666,6 +4798,8 @@ static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
if (phba->hba_flag & HBA_FCOE_MODE)
return;
+ if (phba->lmt & LMT_256Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
if (phba->lmt & LMT_128Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
if (phba->lmt & LMT_64Gb)
@@ -4845,7 +4979,7 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
/**
* lpfc_vmid_poll - VMID timeout detection
- * @ptr: Map to lpfc_hba data structure pointer.
+ * @t: Timer context used to obtain the pointer to lpfc hba data structure.
*
* This routine is invoked when there is no I/O on by a VM for the specified
* amount of time. When this situation is detected, the VMID has to be
@@ -5074,6 +5208,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
case LPFC_FC_LA_SPEED_128G:
port_speed = 128000;
break;
+ case LPFC_FC_LA_SPEED_256G:
+ port_speed = 256000;
+ break;
default:
port_speed = 0;
}
@@ -5267,6 +5404,645 @@ lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
return port_speed;
}
+void
+lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
+{
+ struct rxtable_entry *entry;
+ int cnt = 0, head, tail, last, start;
+
+ head = atomic_read(&phba->rxtable_idx_head);
+ tail = atomic_read(&phba->rxtable_idx_tail);
+ if (!phba->rxtable || head == tail) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
+ "4411 Rxtable is empty\n");
+ return;
+ }
+ last = tail;
+ start = head;
+
+ /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
+ while (start != last) {
+ if (start)
+ start--;
+ else
+ start = LPFC_MAX_RXMONITOR_ENTRY - 1;
+ entry = &phba->rxtable[start];
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
+ "Lat %lld ASz %lld Info %02d BWUtil %d "
+ "Int %d slot %d\n",
+ cnt, entry->max_bytes_per_interval,
+ entry->total_bytes, entry->rcv_bytes,
+ entry->avg_io_latency, entry->avg_io_size,
+ entry->cmf_info, entry->timer_utilization,
+ entry->timer_interval, start);
+ cnt++;
+ if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
+ return;
+ }
+}
+
+/**
+ * lpfc_cgn_update_stat - Save data into congestion stats buffer
+ * @phba: pointer to lpfc hba data structure.
+ * @dtag: FPIN descriptor received
+ *
+ * Increment the FPIN received counter/time when it happens.
+ */
+void
+lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
+{
+ struct lpfc_cgn_info *cp;
+ struct tm broken;
+ struct timespec64 cur_time;
+ u32 cnt;
+ u16 value;
+
+ /* Make sure we have a congestion info buffer */
+ if (!phba->cgn_i)
+ return;
+ cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+ ktime_get_real_ts64(&cur_time);
+ time64_to_tm(cur_time.tv_sec, 0, &broken);
+
+ /* Update congestion statistics */
+ switch (dtag) {
+ case ELS_DTAG_LNK_INTEGRITY:
+ cnt = le32_to_cpu(cp->link_integ_notification);
+ cnt++;
+ cp->link_integ_notification = cpu_to_le32(cnt);
+
+ cp->cgn_stat_lnk_month = broken.tm_mon + 1;
+ cp->cgn_stat_lnk_day = broken.tm_mday;
+ cp->cgn_stat_lnk_year = broken.tm_year - 100;
+ cp->cgn_stat_lnk_hour = broken.tm_hour;
+ cp->cgn_stat_lnk_min = broken.tm_min;
+ cp->cgn_stat_lnk_sec = broken.tm_sec;
+ break;
+ case ELS_DTAG_DELIVERY:
+ cnt = le32_to_cpu(cp->delivery_notification);
+ cnt++;
+ cp->delivery_notification = cpu_to_le32(cnt);
+
+ cp->cgn_stat_del_month = broken.tm_mon + 1;
+ cp->cgn_stat_del_day = broken.tm_mday;
+ cp->cgn_stat_del_year = broken.tm_year - 100;
+ cp->cgn_stat_del_hour = broken.tm_hour;
+ cp->cgn_stat_del_min = broken.tm_min;
+ cp->cgn_stat_del_sec = broken.tm_sec;
+ break;
+ case ELS_DTAG_PEER_CONGEST:
+ cnt = le32_to_cpu(cp->cgn_peer_notification);
+ cnt++;
+ cp->cgn_peer_notification = cpu_to_le32(cnt);
+
+ cp->cgn_stat_peer_month = broken.tm_mon + 1;
+ cp->cgn_stat_peer_day = broken.tm_mday;
+ cp->cgn_stat_peer_year = broken.tm_year - 100;
+ cp->cgn_stat_peer_hour = broken.tm_hour;
+ cp->cgn_stat_peer_min = broken.tm_min;
+ cp->cgn_stat_peer_sec = broken.tm_sec;
+ break;
+ case ELS_DTAG_CONGESTION:
+ cnt = le32_to_cpu(cp->cgn_notification);
+ cnt++;
+ cp->cgn_notification = cpu_to_le32(cnt);
+
+ cp->cgn_stat_cgn_month = broken.tm_mon + 1;
+ cp->cgn_stat_cgn_day = broken.tm_mday;
+ cp->cgn_stat_cgn_year = broken.tm_year - 100;
+ cp->cgn_stat_cgn_hour = broken.tm_hour;
+ cp->cgn_stat_cgn_min = broken.tm_min;
+ cp->cgn_stat_cgn_sec = broken.tm_sec;
+ }
+ if (phba->cgn_fpin_frequency &&
+ phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
+ value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
+ cp->cgn_stat_npm = cpu_to_le32(value);
+ }
+ value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
+ LPFC_CGN_CRC32_SEED);
+ cp->cgn_info_crc = cpu_to_le32(value);
+}
+
+/**
+ * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Save the congestion event data every minute.
+ * On the hour collapse all the minute data into hour data. Every day
+ * collapse all the hour data into daily data. Separate driver
+ * and fabrc congestion event counters that will be saved out
+ * to the registered congestion buffer every minute.
+ */
+static void
+lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
+{
+ struct lpfc_cgn_info *cp;
+ struct tm broken;
+ struct timespec64 cur_time;
+ uint32_t i, index;
+ uint16_t value, mvalue;
+ uint64_t bps;
+ uint32_t mbps;
+ uint32_t dvalue, wvalue, lvalue, avalue;
+ uint64_t latsum;
+ uint16_t *ptr;
+ uint32_t *lptr;
+ uint16_t *mptr;
+
+ /* Make sure we have a congestion info buffer */
+ if (!phba->cgn_i)
+ return;
+ cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+
+ if (time_before(jiffies, phba->cgn_evt_timestamp))
+ return;
+ phba->cgn_evt_timestamp = jiffies +
+ msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
+ phba->cgn_evt_minute++;
+
+ /* We should get to this point in the routine on 1 minute intervals */
+
+ ktime_get_real_ts64(&cur_time);
+ time64_to_tm(cur_time.tv_sec, 0, &broken);
+
+ if (phba->cgn_fpin_frequency &&
+ phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
+ value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
+ cp->cgn_stat_npm = cpu_to_le32(value);
+ }
+
+ /* Read and clear the latency counters for this minute */
+ lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
+ latsum = atomic64_read(&phba->cgn_latency_evt);
+ atomic_set(&phba->cgn_latency_evt_cnt, 0);
+ atomic64_set(&phba->cgn_latency_evt, 0);
+
+ /* We need to store MB/sec bandwidth in the congestion information.
+ * block_cnt is count of 512 byte blocks for the entire minute,
+ * bps will get bytes per sec before finally converting to MB/sec.
+ */
+ bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
+ phba->rx_block_cnt = 0;
+ mvalue = bps / (1024 * 1024); /* convert to MB/sec */
+
+ /* Every minute */
+ /* cgn parameters */
+ cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
+ cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
+ cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
+ cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
+
+ /* Fill in default LUN qdepth */
+ value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
+ cp->cgn_lunq = cpu_to_le16(value);
+
+ /* Record congestion buffer info - every minute
+ * cgn_driver_evt_cnt (Driver events)
+ * cgn_fabric_warn_cnt (Congestion Warnings)
+ * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
+ * cgn_fabric_alarm_cnt (Congestion Alarms)
+ */
+ index = ++cp->cgn_index_minute;
+ if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
+ cp->cgn_index_minute = 0;
+ index = 0;
+ }
+
+ /* Get the number of driver events in this sample and reset counter */
+ dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
+ atomic_set(&phba->cgn_driver_evt_cnt, 0);
+
+ /* Get the number of warning events - FPIN and Signal for this minute */
+ wvalue = 0;
+ if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
+ phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
+ phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
+ wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
+ atomic_set(&phba->cgn_fabric_warn_cnt, 0);
+
+ /* Get the number of alarm events - FPIN and Signal for this minute */
+ avalue = 0;
+ if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
+ phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
+ avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
+ atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
+
+ /* Collect the driver, warning, alarm and latency counts for this
+ * minute into the driver congestion buffer.
+ */
+ ptr = &cp->cgn_drvr_min[index];
+ value = (uint16_t)dvalue;
+ *ptr = cpu_to_le16(value);
+
+ ptr = &cp->cgn_warn_min[index];
+ value = (uint16_t)wvalue;
+ *ptr = cpu_to_le16(value);
+
+ ptr = &cp->cgn_alarm_min[index];
+ value = (uint16_t)avalue;
+ *ptr = cpu_to_le16(value);
+
+ lptr = &cp->cgn_latency_min[index];
+ if (lvalue) {
+ lvalue = (uint32_t)div_u64(latsum, lvalue);
+ *lptr = cpu_to_le32(lvalue);
+ } else {
+ *lptr = 0;
+ }
+
+ /* Collect the bandwidth value into the driver's congesion buffer. */
+ mptr = &cp->cgn_bw_min[index];
+ *mptr = cpu_to_le16(mvalue);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
+ index, dvalue, wvalue, *lptr, mvalue, avalue);
+
+ /* Every hour */
+ if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
+ /* Record congestion buffer info - every hour
+ * Collapse all minutes into an hour
+ */
+ index = ++cp->cgn_index_hour;
+ if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
+ cp->cgn_index_hour = 0;
+ index = 0;
+ }
+
+ dvalue = 0;
+ wvalue = 0;
+ lvalue = 0;
+ avalue = 0;
+ mvalue = 0;
+ mbps = 0;
+ for (i = 0; i < LPFC_MIN_HOUR; i++) {
+ dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
+ wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
+ lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
+ mbps += le16_to_cpu(cp->cgn_bw_min[i]);
+ avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
+ }
+ if (lvalue) /* Avg of latency averages */
+ lvalue /= LPFC_MIN_HOUR;
+ if (mbps) /* Avg of Bandwidth averages */
+ mvalue = mbps / LPFC_MIN_HOUR;
+
+ lptr = &cp->cgn_drvr_hr[index];
+ *lptr = cpu_to_le32(dvalue);
+ lptr = &cp->cgn_warn_hr[index];
+ *lptr = cpu_to_le32(wvalue);
+ lptr = &cp->cgn_latency_hr[index];
+ *lptr = cpu_to_le32(lvalue);
+ mptr = &cp->cgn_bw_hr[index];
+ *mptr = cpu_to_le16(mvalue);
+ lptr = &cp->cgn_alarm_hr[index];
+ *lptr = cpu_to_le32(avalue);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "2419 Congestion Info - hour "
+ "(%d): %d %d %d %d %d\n",
+ index, dvalue, wvalue, lvalue, mvalue, avalue);
+ }
+
+ /* Every day */
+ if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
+ /* Record congestion buffer info - every hour
+ * Collapse all hours into a day. Rotate days
+ * after LPFC_MAX_CGN_DAYS.
+ */
+ index = ++cp->cgn_index_day;
+ if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
+ cp->cgn_index_day = 0;
+ index = 0;
+ }
+
+ /* Anytime we overwrite daily index 0, after we wrap,
+ * we will be overwriting the oldest day, so we must
+ * update the congestion data start time for that day.
+ * That start time should have previously been saved after
+ * we wrote the last days worth of data.
+ */
+ if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
+ time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
+
+ cp->cgn_info_month = broken.tm_mon + 1;
+ cp->cgn_info_day = broken.tm_mday;
+ cp->cgn_info_year = broken.tm_year - 100;
+ cp->cgn_info_hour = broken.tm_hour;
+ cp->cgn_info_minute = broken.tm_min;
+ cp->cgn_info_second = broken.tm_sec;
+
+ lpfc_printf_log
+ (phba, KERN_INFO, LOG_CGN_MGMT,
+ "2646 CGNInfo idx0 Start Time: "
+ "%d/%d/%d %d:%d:%d\n",
+ cp->cgn_info_day, cp->cgn_info_month,
+ cp->cgn_info_year, cp->cgn_info_hour,
+ cp->cgn_info_minute, cp->cgn_info_second);
+ }
+
+ dvalue = 0;
+ wvalue = 0;
+ lvalue = 0;
+ mvalue = 0;
+ mbps = 0;
+ avalue = 0;
+ for (i = 0; i < LPFC_HOUR_DAY; i++) {
+ dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
+ wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
+ lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
+ mbps += le32_to_cpu(cp->cgn_bw_hr[i]);
+ avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
+ }
+ if (lvalue) /* Avg of latency averages */
+ lvalue /= LPFC_HOUR_DAY;
+ if (mbps) /* Avg of Bandwidth averages */
+ mvalue = mbps / LPFC_HOUR_DAY;
+
+ lptr = &cp->cgn_drvr_day[index];
+ *lptr = cpu_to_le32(dvalue);
+ lptr = &cp->cgn_warn_day[index];
+ *lptr = cpu_to_le32(wvalue);
+ lptr = &cp->cgn_latency_day[index];
+ *lptr = cpu_to_le32(lvalue);
+ mptr = &cp->cgn_bw_day[index];
+ *mptr = cpu_to_le16(mvalue);
+ lptr = &cp->cgn_alarm_day[index];
+ *lptr = cpu_to_le32(avalue);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "2420 Congestion Info - daily (%d): "
+ "%d %d %d %d %d\n",
+ index, dvalue, wvalue, lvalue, mvalue, avalue);
+
+ /* We just wrote LPFC_MAX_CGN_DAYS of data,
+ * so we are wrapped on any data after this.
+ * Save this as the start time for the next day.
+ */
+ if (index == (LPFC_MAX_CGN_DAYS - 1)) {
+ phba->hba_flag |= HBA_CGN_DAY_WRAP;
+ ktime_get_real_ts64(&phba->cgn_daily_ts);
+ }
+ }
+
+ /* Use the frequency found in the last rcv'ed FPIN */
+ value = phba->cgn_fpin_frequency;
+ if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN)
+ cp->cgn_warn_freq = cpu_to_le16(value);
+ if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM)
+ cp->cgn_alarm_freq = cpu_to_le16(value);
+
+ /* Frequency (in ms) Signal Warning/Signal Congestion Notifications
+ * are received by the HBA
+ */
+ value = phba->cgn_sig_freq;
+
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
+ phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
+ cp->cgn_warn_freq = cpu_to_le16(value);
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
+ cp->cgn_alarm_freq = cpu_to_le16(value);
+
+ lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
+ LPFC_CGN_CRC32_SEED);
+ cp->cgn_info_crc = cpu_to_le32(lvalue);
+}
+
+/**
+ * lpfc_calc_cmf_latency - latency from start of rxate timer interval
+ * @phba: The Hba for which this call is being executed.
+ *
+ * The routine calculates the latency from the beginning of the CMF timer
+ * interval to the current point in time. It is called from IO completion
+ * when we exceed our Bandwidth limitation for the time interval.
+ */
+uint32_t
+lpfc_calc_cmf_latency(struct lpfc_hba *phba)
+{
+ struct timespec64 cmpl_time;
+ uint32_t msec = 0;
+
+ ktime_get_real_ts64(&cmpl_time);
+
+ /* This routine works on a ms granularity so sec and usec are
+ * converted accordingly.
+ */
+ if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
+ msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
+ NSEC_PER_MSEC;
+ } else {
+ if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
+ msec = (cmpl_time.tv_sec -
+ phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
+ msec += ((cmpl_time.tv_nsec -
+ phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
+ } else {
+ msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
+ 1) * MSEC_PER_SEC;
+ msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
+ cmpl_time.tv_nsec) / NSEC_PER_MSEC);
+ }
+ }
+ return msec;
+}
+
+/**
+ * lpfc_cmf_timer - This is the timer function for one congestion
+ * rate interval.
+ * @timer: Pointer to the high resolution timer that expired
+ */
+static enum hrtimer_restart
+lpfc_cmf_timer(struct hrtimer *timer)
+{
+ struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
+ cmf_timer);
+ struct rxtable_entry *entry;
+ uint32_t io_cnt;
+ uint32_t head, tail;
+ uint32_t busy, max_read;
+ uint64_t total, rcv, lat, mbpi;
+ int timer_interval = LPFC_CMF_INTERVAL;
+ uint32_t ms;
+ struct lpfc_cgn_stat *cgs;
+ int cpu;
+
+ /* Only restart the timer if congestion mgmt is on */
+ if (phba->cmf_active_mode == LPFC_CFG_OFF ||
+ !phba->cmf_latency.tv_sec) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6224 CMF timer exit: %d %lld\n",
+ phba->cmf_active_mode,
+ (uint64_t)phba->cmf_latency.tv_sec);
+ return HRTIMER_NORESTART;
+ }
+
+ /* If pport is not ready yet, just exit and wait for
+ * the next timer cycle to hit.
+ */
+ if (!phba->pport)
+ goto skip;
+
+ /* Do not block SCSI IO while in the timer routine since
+ * total_bytes will be cleared
+ */
+ atomic_set(&phba->cmf_stop_io, 1);
+
+ /* First we need to calculate the actual ms between
+ * the last timer interrupt and this one. We ask for
+ * LPFC_CMF_INTERVAL, however the actual time may
+ * vary depending on system overhead.
+ */
+ ms = lpfc_calc_cmf_latency(phba);
+
+
+ /* Immediately after we calculate the time since the last
+ * timer interrupt, set the start time for the next
+ * interrupt
+ */
+ ktime_get_real_ts64(&phba->cmf_latency);
+
+ phba->cmf_link_byte_count =
+ div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
+
+ /* Collect all the stats from the prior timer interval */
+ total = 0;
+ io_cnt = 0;
+ lat = 0;
+ rcv = 0;
+ for_each_present_cpu(cpu) {
+ cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+ total += atomic64_xchg(&cgs->total_bytes, 0);
+ io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
+ lat += atomic64_xchg(&cgs->rx_latency, 0);
+ rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
+ }
+
+ /* Before we issue another CMF_SYNC_WQE, retrieve the BW
+ * returned from the last CMF_SYNC_WQE issued, from
+ * cmf_last_sync_bw. This will be the target BW for
+ * this next timer interval.
+ */
+ if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
+ phba->link_state != LPFC_LINK_DOWN &&
+ phba->hba_flag & HBA_SETUP) {
+ mbpi = phba->cmf_last_sync_bw;
+ phba->cmf_last_sync_bw = 0;
+ lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total);
+ } else {
+ /* For Monitor mode or link down we want mbpi
+ * to be the full link speed
+ */
+ mbpi = phba->cmf_link_byte_count;
+ }
+ phba->cmf_timer_cnt++;
+
+ if (io_cnt) {
+ /* Update congestion info buffer latency in us */
+ atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
+ atomic64_add(lat, &phba->cgn_latency_evt);
+ }
+ busy = atomic_xchg(&phba->cmf_busy, 0);
+ max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
+
+ /* Calculate MBPI for the next timer interval */
+ if (mbpi) {
+ if (mbpi > phba->cmf_link_byte_count ||
+ phba->cmf_active_mode == LPFC_CFG_MONITOR)
+ mbpi = phba->cmf_link_byte_count;
+
+ /* Change max_bytes_per_interval to what the prior
+ * CMF_SYNC_WQE cmpl indicated.
+ */
+ if (mbpi != phba->cmf_max_bytes_per_interval)
+ phba->cmf_max_bytes_per_interval = mbpi;
+ }
+
+ /* Save rxmonitor information for debug */
+ if (phba->rxtable) {
+ head = atomic_xchg(&phba->rxtable_idx_head,
+ LPFC_RXMONITOR_TABLE_IN_USE);
+ entry = &phba->rxtable[head];
+ entry->total_bytes = total;
+ entry->rcv_bytes = rcv;
+ entry->cmf_busy = busy;
+ entry->cmf_info = phba->cmf_active_info;
+ if (io_cnt) {
+ entry->avg_io_latency = div_u64(lat, io_cnt);
+ entry->avg_io_size = div_u64(rcv, io_cnt);
+ } else {
+ entry->avg_io_latency = 0;
+ entry->avg_io_size = 0;
+ }
+ entry->max_read_cnt = max_read;
+ entry->io_cnt = io_cnt;
+ entry->max_bytes_per_interval = mbpi;
+ if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
+ entry->timer_utilization = phba->cmf_last_ts;
+ else
+ entry->timer_utilization = ms;
+ entry->timer_interval = ms;
+ phba->cmf_last_ts = 0;
+
+ /* Increment rxtable index */
+ head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
+ tail = atomic_read(&phba->rxtable_idx_tail);
+ if (head == tail) {
+ tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
+ atomic_set(&phba->rxtable_idx_tail, tail);
+ }
+ atomic_set(&phba->rxtable_idx_head, head);
+ }
+
+ if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
+ /* If Monitor mode, check if we are oversubscribed
+ * against the full line rate.
+ */
+ if (mbpi && total > mbpi)
+ atomic_inc(&phba->cgn_driver_evt_cnt);
+ }
+ phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */
+
+ /* Each minute save Fabric and Driver congestion information */
+ lpfc_cgn_save_evt_cnt(phba);
+
+ /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
+ * minute, adjust our next timer interval, if needed, to ensure a
+ * 1 minute granularity when we get the next timer interrupt.
+ */
+ if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
+ phba->cgn_evt_timestamp)) {
+ timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
+ jiffies);
+ if (timer_interval <= 0)
+ timer_interval = LPFC_CMF_INTERVAL;
+
+ /* If we adjust timer_interval, max_bytes_per_interval
+ * needs to be adjusted as well.
+ */
+ phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
+ timer_interval, 1000);
+ if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
+ phba->cmf_max_bytes_per_interval =
+ phba->cmf_link_byte_count;
+ }
+
+ /* Since total_bytes has already been zero'ed, its okay to unblock
+ * after max_bytes_per_interval is setup.
+ */
+ if (atomic_xchg(&phba->cmf_bw_wait, 0))
+ queue_work(phba->wq, &phba->unblock_request_work);
+
+ /* SCSI IO is now unblocked */
+ atomic_set(&phba->cmf_stop_io, 0);
+
+skip:
+ hrtimer_forward_now(timer,
+ ktime_set(0, timer_interval * NSEC_PER_MSEC));
+ return HRTIMER_RESTART;
+}
+
#define trunk_link_status(__idx)\
bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
@@ -5329,6 +6105,9 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
trunk_link_status(0), trunk_link_status(1),
trunk_link_status(2), trunk_link_status(3));
+ if (phba->cmf_active_mode != LPFC_CFG_OFF)
+ lpfc_cmf_signal_init(phba);
+
if (port_fault)
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3202 trunk error:0x%x (%s) seen on port0:%s "
@@ -5510,9 +6289,10 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
uint8_t operational = 0;
struct temp_event temp_event_data;
struct lpfc_acqe_misconfigured_event *misconfigured;
+ struct lpfc_acqe_cgn_signal *cgn_signal;
struct Scsi_Host *shost;
struct lpfc_vport **vports;
- int rc, i;
+ int rc, i, cnt;
evt_type = bf_get(lpfc_trailer_type, acqe_sli);
@@ -5668,6 +6448,10 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"Event Data1:x%08x Event Data2: x%08x\n",
acqe_sli->event_data1, acqe_sli->event_data2);
break;
+ case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
+ /* Call FW to obtain active parms */
+ lpfc_sli4_cgn_parm_chg_evt(phba);
+ break;
case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
/* Misconfigured WWN. Reports that the SLI Port is configured
* to use FA-WWN, but the attached device doesn’t support it.
@@ -5685,6 +6469,40 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"Event Data1: x%08x Event Data2: x%08x\n",
acqe_sli->event_data1, acqe_sli->event_data2);
break;
+ case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
+ if (phba->cmf_active_mode == LPFC_CFG_OFF)
+ break;
+ cgn_signal = (struct lpfc_acqe_cgn_signal *)
+ &acqe_sli->event_data1;
+ phba->cgn_acqe_cnt++;
+
+ cnt = bf_get(lpfc_warn_acqe, cgn_signal);
+ atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
+ atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
+
+ /* no threshold for CMF, even 1 signal will trigger an event */
+
+ /* Alarm overrides warning, so check that first */
+ if (cgn_signal->alarm_cnt) {
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+ /* Keep track of alarm cnt for cgn_info */
+ atomic_add(cgn_signal->alarm_cnt,
+ &phba->cgn_fabric_alarm_cnt);
+ /* Keep track of alarm cnt for CMF_SYNC_WQE */
+ atomic_add(cgn_signal->alarm_cnt,
+ &phba->cgn_sync_alarm_cnt);
+ }
+ } else if (cnt) {
+ /* signal action needs to be taken */
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
+ phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+ /* Keep track of warning cnt for cgn_info */
+ atomic_add(cnt, &phba->cgn_fabric_warn_cnt);
+ /* Keep track of warning cnt for CMF_SYNC_WQE */
+ atomic_add(cnt, &phba->cgn_sync_warn_cnt);
+ }
+ }
+ break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3193 Unrecognized SLI event, type: 0x%x",
@@ -6060,6 +6878,276 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
+ * is an asynchronous notification of a request to reset CM stats.
+ **/
+static void
+lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
+{
+ if (!phba->cgn_i)
+ return;
+ lpfc_init_congestion_stat(phba);
+}
+
+/**
+ * lpfc_cgn_params_val - Validate FW congestion parameters.
+ * @phba: pointer to lpfc hba data structure.
+ * @p_cfg_param: pointer to FW provided congestion parameters.
+ *
+ * This routine validates the congestion parameters passed
+ * by the FW to the driver via an ACQE event.
+ **/
+static void
+lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
+{
+ spin_lock_irq(&phba->hbalock);
+
+ if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
+ LPFC_CFG_MONITOR)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
+ "6225 CMF mode param out of range: %d\n",
+ p_cfg_param->cgn_param_mode);
+ p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_cgn_params_parse - Process a FW cong parm change event
+ * @phba: pointer to lpfc hba data structure.
+ * @p_cgn_param: pointer to a data buffer with the FW cong params.
+ * @len: the size of pdata in bytes.
+ *
+ * This routine validates the congestion management buffer signature
+ * from the FW, validates the contents and makes corrections for
+ * valid, in-range values. If the signature magic is correct and
+ * after parameter validation, the contents are copied to the driver's
+ * @phba structure. If the magic is incorrect, an error message is
+ * logged.
+ **/
+static void
+lpfc_cgn_params_parse(struct lpfc_hba *phba,
+ struct lpfc_cgn_param *p_cgn_param, uint32_t len)
+{
+ struct lpfc_cgn_info *cp;
+ uint32_t crc, oldmode;
+
+ /* Make sure the FW has encoded the correct magic number to
+ * validate the congestion parameter in FW memory.
+ */
+ if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
+ "4668 FW cgn parm buffer data: "
+ "magic 0x%x version %d mode %d "
+ "level0 %d level1 %d "
+ "level2 %d byte13 %d "
+ "byte14 %d byte15 %d "
+ "byte11 %d byte12 %d activeMode %d\n",
+ p_cgn_param->cgn_param_magic,
+ p_cgn_param->cgn_param_version,
+ p_cgn_param->cgn_param_mode,
+ p_cgn_param->cgn_param_level0,
+ p_cgn_param->cgn_param_level1,
+ p_cgn_param->cgn_param_level2,
+ p_cgn_param->byte13,
+ p_cgn_param->byte14,
+ p_cgn_param->byte15,
+ p_cgn_param->byte11,
+ p_cgn_param->byte12,
+ phba->cmf_active_mode);
+
+ oldmode = phba->cmf_active_mode;
+
+ /* Any parameters out of range are corrected to defaults
+ * by this routine. No need to fail.
+ */
+ lpfc_cgn_params_val(phba, p_cgn_param);
+
+ /* Parameters are verified, move them into driver storage */
+ spin_lock_irq(&phba->hbalock);
+ memcpy(&phba->cgn_p, p_cgn_param,
+ sizeof(struct lpfc_cgn_param));
+
+ /* Update parameters in congestion info buffer now */
+ if (phba->cgn_i) {
+ cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+ cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
+ cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
+ cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
+ cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
+ crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
+ LPFC_CGN_CRC32_SEED);
+ cp->cgn_info_crc = cpu_to_le32(crc);
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
+
+ switch (oldmode) {
+ case LPFC_CFG_OFF:
+ if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
+ /* Turning CMF on */
+ lpfc_cmf_start(phba);
+
+ if (phba->link_state >= LPFC_LINK_UP) {
+ phba->cgn_reg_fpin =
+ phba->cgn_init_reg_fpin;
+ phba->cgn_reg_signal =
+ phba->cgn_init_reg_signal;
+ lpfc_issue_els_edc(phba->pport, 0);
+ }
+ }
+ break;
+ case LPFC_CFG_MANAGED:
+ switch (phba->cgn_p.cgn_param_mode) {
+ case LPFC_CFG_OFF:
+ /* Turning CMF off */
+ lpfc_cmf_stop(phba);
+ if (phba->link_state >= LPFC_LINK_UP)
+ lpfc_issue_els_edc(phba->pport, 0);
+ break;
+ case LPFC_CFG_MONITOR:
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "4661 Switch from MANAGED to "
+ "`MONITOR mode\n");
+ phba->cmf_max_bytes_per_interval =
+ phba->cmf_link_byte_count;
+
+ /* Resume blocked IO - unblock on workqueue */
+ queue_work(phba->wq,
+ &phba->unblock_request_work);
+ break;
+ }
+ break;
+ case LPFC_CFG_MONITOR:
+ switch (phba->cgn_p.cgn_param_mode) {
+ case LPFC_CFG_OFF:
+ /* Turning CMF off */
+ lpfc_cmf_stop(phba);
+ if (phba->link_state >= LPFC_LINK_UP)
+ lpfc_issue_els_edc(phba->pport, 0);
+ break;
+ case LPFC_CFG_MANAGED:
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "4662 Switch from MONITOR to "
+ "MANAGED mode\n");
+ lpfc_cmf_signal_init(phba);
+ break;
+ }
+ break;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+ "4669 FW cgn parm buf wrong magic 0x%x "
+ "version %d\n", p_cgn_param->cgn_param_magic,
+ p_cgn_param->cgn_param_version);
+ }
+}
+
+/**
+ * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issues a read_object mailbox command to
+ * get the congestion management parameters from the FW
+ * parses it and updates the driver maintained values.
+ *
+ * Returns
+ * 0 if the object was empty
+ * -Eval if an error was encountered
+ * Count if bytes were read from object
+ **/
+int
+lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
+{
+ int ret = 0;
+ struct lpfc_cgn_param *p_cgn_param = NULL;
+ u32 *pdata = NULL;
+ u32 len = 0;
+
+ /* Find out if the FW has a new set of congestion parameters. */
+ len = sizeof(struct lpfc_cgn_param);
+ pdata = kzalloc(len, GFP_KERNEL);
+ ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
+ pdata, len);
+
+ /* 0 means no data. A negative means error. A positive means
+ * bytes were copied.
+ */
+ if (!ret) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+ "4670 CGN RD OBJ returns no data\n");
+ goto rd_obj_err;
+ } else if (ret < 0) {
+ /* Some error. Just exit and return it to the caller.*/
+ goto rd_obj_err;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
+ "6234 READ CGN PARAMS Successful %d\n", len);
+
+ /* Parse data pointer over len and update the phba congestion
+ * parameters with values passed back. The receive rate values
+ * may have been altered in FW, but take no action here.
+ */
+ p_cgn_param = (struct lpfc_cgn_param *)pdata;
+ lpfc_cgn_params_parse(phba, p_cgn_param, len);
+
+ rd_obj_err:
+ kfree(pdata);
+ return ret;
+}
+
+/**
+ * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The FW generated Async ACQE SLI event calls this routine when
+ * the event type is an SLI Internal Port Event and the Event Code
+ * indicates a change to the FW maintained congestion parameters.
+ *
+ * This routine executes a Read_Object mailbox call to obtain the
+ * current congestion parameters maintained in FW and corrects
+ * the driver's active congestion parameters.
+ *
+ * The acqe event is not passed because there is no further data
+ * required.
+ *
+ * Returns nonzero error if event processing encountered an error.
+ * Zero otherwise for success.
+ **/
+static int
+lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
+{
+ int ret = 0;
+
+ if (!phba->sli4_hba.pc_sli4_params.cmf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+ "4664 Cgn Evt when E2E off. Drop event\n");
+ return -EACCES;
+ }
+
+ /* If the event is claiming an empty object, it's ok. A write
+ * could have cleared it. Only error is a negative return
+ * status.
+ */
+ ret = lpfc_sli4_cgn_params_read(phba);
+ if (ret < 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+ "4667 Error reading Cgn Params (%d)\n",
+ ret);
+ } else if (!ret) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+ "4673 CGN Event empty object.\n");
+ }
+ return ret;
+}
+
+/**
* lpfc_sli4_async_event_proc - Process all the pending asynchronous event
* @phba: pointer to lpfc hba data structure.
*
@@ -6107,6 +7195,9 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
case LPFC_TRAILER_CODE_SLI:
lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
break;
+ case LPFC_TRAILER_CODE_CMSTAT:
+ lpfc_sli4_async_cmstat_evt(phba);
+ break;
default:
lpfc_printf_log(phba, KERN_ERR,
LOG_TRACE_EVENT,
@@ -6391,6 +7482,15 @@ lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
return rc;
}
+static void
+lpfc_unblock_requests_work(struct work_struct *work)
+{
+ struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
+ unblock_request_work);
+
+ lpfc_unblock_requests(phba);
+}
+
/**
* lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
* @phba: pointer to lpfc hba data structure.
@@ -6466,7 +7566,7 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
lpfc_idle_stat_delay_work);
-
+ INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
return 0;
}
@@ -6697,6 +7797,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* FCF rediscover timer */
timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
+ /* CMF congestion timer */
+ hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ phba->cmf_timer.function = lpfc_cmf_timer;
+
/*
* Control structure for handling external multi-buffer mailbox
* command pass-through.
@@ -7145,6 +8249,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
#endif
+ phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
+ if (!phba->cmf_stat) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "3331 Failed allocating per cpu cgn stats\n");
+ rc = -ENOMEM;
+ goto out_free_hba_hdwq_info;
+ }
+
/*
* Enable sr-iov virtual functions if supported and configured
* through the module parameter.
@@ -7164,6 +8276,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
return 0;
+out_free_hba_hdwq_info:
+ free_percpu(phba->sli4_hba.c_stat);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
out_free_hba_idle_stat:
kfree(phba->sli4_hba.idle_stat);
@@ -7211,6 +8325,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
free_percpu(phba->sli4_hba.c_stat);
#endif
+ free_percpu(phba->cmf_stat);
kfree(phba->sli4_hba.idle_stat);
/* Free memory allocated for msi-x interrupt vector to CPU mapping */
@@ -8537,9 +9652,12 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
}
/* FW supports persistent topology - override module parameter value */
phba->hba_flag |= HBA_PERSISTENT_TOPO;
- switch (phba->pcidev->device) {
- case PCI_DEVICE_ID_LANCER_G7_FC:
- case PCI_DEVICE_ID_LANCER_G6_FC:
+
+ /* if ASIC_GEN_NUM >= 0xC) */
+ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_6) ||
+ (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_FAMILY_G6)) {
if (!tf) {
phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
? FLAGS_TOPOLOGY_MODE_LOOP
@@ -8547,8 +9665,7 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
} else {
phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
}
- break;
- default: /* G5 */
+ } else { /* G5 */
if (tf) {
/* If topology failover set - pt is '0' or '1' */
phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
@@ -8558,7 +9675,6 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
? FLAGS_TOPOLOGY_MODE_PT_PT
: FLAGS_TOPOLOGY_MODE_LOOP);
}
- break;
}
if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -8683,6 +9799,52 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
+
+ /* Next decide on FPIN or Signal E2E CGN support
+ * For congestion alarms and warnings valid combination are:
+ * 1. FPIN alarms / FPIN warnings
+ * 2. Signal alarms / Signal warnings
+ * 3. FPIN alarms / Signal warnings
+ * 4. Signal alarms / FPIN warnings
+ *
+ * Initialize the adapter frequency to 100 mSecs
+ */
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+
+ if (lpfc_use_cgn_signal) {
+ if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
+ phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
+ phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
+ }
+ if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
+ /* MUST support both alarm and warning
+ * because EDC does not support alarm alone.
+ */
+ if (phba->cgn_reg_signal !=
+ EDC_CG_SIG_WARN_ONLY) {
+ /* Must support both or none */
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
+ phba->cgn_reg_signal =
+ EDC_CG_SIG_NOTSUPPORTED;
+ } else {
+ phba->cgn_reg_signal =
+ EDC_CG_SIG_WARN_ALARM;
+ phba->cgn_reg_fpin =
+ LPFC_CGN_FPIN_NONE;
+ }
+ }
+ }
+
+ /* Set the congestion initial signal and fpin values. */
+ phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
+ phba->cgn_init_reg_signal = phba->cgn_reg_signal;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
+ phba->cgn_reg_signal, phba->cgn_reg_fpin);
+
lpfc_map_topology(phba, rd_config);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2003 cfg params Extents? %d "
@@ -12063,6 +13225,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
struct pci_dev *pdev = phba->pcidev;
lpfc_stop_hba_timers(phba);
+ hrtimer_cancel(&phba->cmf_timer);
+
if (phba->pport)
phba->sli4_hba.intr_enable = 0;
@@ -12133,6 +13297,240 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
phba->pport->work_port_events = 0;
}
+static uint32_t
+lpfc_cgn_crc32(uint32_t crc, u8 byte)
+{
+ uint32_t msb = 0;
+ uint32_t bit;
+
+ for (bit = 0; bit < 8; bit++) {
+ msb = (crc >> 31) & 1;
+ crc <<= 1;
+
+ if (msb ^ (byte & 1)) {
+ crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
+ crc |= 1;
+ }
+ byte >>= 1;
+ }
+ return crc;
+}
+
+static uint32_t
+lpfc_cgn_reverse_bits(uint32_t wd)
+{
+ uint32_t result = 0;
+ uint32_t i;
+
+ for (i = 0; i < 32; i++) {
+ result <<= 1;
+ result |= (1 & (wd >> i));
+ }
+ return result;
+}
+
+/*
+ * The routine corresponds with the algorithm the HBA firmware
+ * uses to validate the data integrity.
+ */
+uint32_t
+lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
+{
+ uint32_t i;
+ uint32_t result;
+ uint8_t *data = (uint8_t *)ptr;
+
+ for (i = 0; i < byteLen; ++i)
+ crc = lpfc_cgn_crc32(crc, data[i]);
+
+ result = ~lpfc_cgn_reverse_bits(crc);
+ return result;
+}
+
+void
+lpfc_init_congestion_buf(struct lpfc_hba *phba)
+{
+ struct lpfc_cgn_info *cp;
+ struct timespec64 cmpl_time;
+ struct tm broken;
+ uint16_t size;
+ uint32_t crc;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
+
+ if (!phba->cgn_i)
+ return;
+ cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+
+ atomic_set(&phba->cgn_fabric_warn_cnt, 0);
+ atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
+ atomic_set(&phba->cgn_sync_alarm_cnt, 0);
+ atomic_set(&phba->cgn_sync_warn_cnt, 0);
+
+ atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
+ atomic64_set(&phba->cgn_acqe_stat.warn, 0);
+ atomic_set(&phba->cgn_driver_evt_cnt, 0);
+ atomic_set(&phba->cgn_latency_evt_cnt, 0);
+ atomic64_set(&phba->cgn_latency_evt, 0);
+ phba->cgn_evt_minute = 0;
+ phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
+
+ memset(cp, 0xff, LPFC_CGN_DATA_SIZE);
+ cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
+ cp->cgn_info_version = LPFC_CGN_INFO_V3;
+
+ /* cgn parameters */
+ cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
+ cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
+ cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
+ cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
+
+ ktime_get_real_ts64(&cmpl_time);
+ time64_to_tm(cmpl_time.tv_sec, 0, &broken);
+
+ cp->cgn_info_month = broken.tm_mon + 1;
+ cp->cgn_info_day = broken.tm_mday;
+ cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */
+ cp->cgn_info_hour = broken.tm_hour;
+ cp->cgn_info_minute = broken.tm_min;
+ cp->cgn_info_second = broken.tm_sec;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
+ "2643 CGNInfo Init: Start Time "
+ "%d/%d/%d %d:%d:%d\n",
+ cp->cgn_info_day, cp->cgn_info_month,
+ cp->cgn_info_year, cp->cgn_info_hour,
+ cp->cgn_info_minute, cp->cgn_info_second);
+
+ /* Fill in default LUN qdepth */
+ if (phba->pport) {
+ size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
+ cp->cgn_lunq = cpu_to_le16(size);
+ }
+
+ /* last used Index initialized to 0xff already */
+
+ cp->cgn_warn_freq = LPFC_FPIN_INIT_FREQ;
+ cp->cgn_alarm_freq = LPFC_FPIN_INIT_FREQ;
+ crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
+ cp->cgn_info_crc = cpu_to_le32(crc);
+
+ phba->cgn_evt_timestamp = jiffies +
+ msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
+}
+
+void
+lpfc_init_congestion_stat(struct lpfc_hba *phba)
+{
+ struct lpfc_cgn_info *cp;
+ struct timespec64 cmpl_time;
+ struct tm broken;
+ uint32_t crc;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6236 INIT Congestion Stat %p\n", phba->cgn_i);
+
+ if (!phba->cgn_i)
+ return;
+
+ cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+ memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE);
+
+ ktime_get_real_ts64(&cmpl_time);
+ time64_to_tm(cmpl_time.tv_sec, 0, &broken);
+
+ cp->cgn_stat_month = broken.tm_mon + 1;
+ cp->cgn_stat_day = broken.tm_mday;
+ cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */
+ cp->cgn_stat_hour = broken.tm_hour;
+ cp->cgn_stat_minute = broken.tm_min;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
+ "2647 CGNstat Init: Start Time "
+ "%d/%d/%d %d:%d\n",
+ cp->cgn_stat_day, cp->cgn_stat_month,
+ cp->cgn_stat_year, cp->cgn_stat_hour,
+ cp->cgn_stat_minute);
+
+ crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
+ cp->cgn_info_crc = cpu_to_le32(crc);
+}
+
+/**
+ * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
+ * @phba: Pointer to hba context object.
+ * @reg: flag to determine register or unregister.
+ */
+static int
+__lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
+{
+ struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ LPFC_MBOXQ_t *mboxq;
+ int length, rc;
+
+ if (!phba->cgn_i)
+ return -ENXIO;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2641 REG_CONGESTION_BUF mbox allocation fail: "
+ "HBA state x%x reg %d\n",
+ phba->pport->port_state, reg);
+ return -ENOMEM;
+ }
+
+ length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
+ LPFC_SLI4_MBX_EMBED);
+ reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
+ bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
+ if (reg > 0)
+ bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
+ else
+ bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
+ reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
+ reg_congestion_buf->addr_lo =
+ putPaddrLow(phba->cgn_i->phys);
+ reg_congestion_buf->addr_hi =
+ putPaddrHigh(phba->cgn_i->phys);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2642 REG_CONGESTION_BUF mailbox "
+ "failed with status x%x add_status x%x,"
+ " mbx status x%x reg %d\n",
+ shdr_status, shdr_add_status, rc, reg);
+ return -ENXIO;
+ }
+ return 0;
+}
+
+int
+lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
+{
+ lpfc_cmf_stop(phba);
+ return __lpfc_reg_congestion_buf(phba, 0);
+}
+
+int
+lpfc_reg_congestion_buf(struct lpfc_hba *phba)
+{
+ return __lpfc_reg_congestion_buf(phba, 1);
+}
+
/**
* lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
* @phba: Pointer to HBA context object.
@@ -12241,7 +13639,6 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
bf_get(cfg_xib, mbx_sli4_parameters),
phba->cfg_enable_fc4_type);
fcponly:
- phba->nvme_support = 0;
phba->nvmet_support = 0;
phba->cfg_nvmet_mrq = 0;
phba->cfg_nvme_seg_cnt = 0;
@@ -12259,9 +13656,10 @@ fcponly:
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
- if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
- LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
+ /* Enable embedded Payload BDE if support is indicated */
+ if (bf_get(cfg_pbde, mbx_sli4_parameters))
+ phba->cfg_enable_pbde = 1;
+ else
phba->cfg_enable_pbde = 0;
/*
@@ -12299,7 +13697,7 @@ fcponly:
"6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
bf_get(cfg_xib, mbx_sli4_parameters),
phba->cfg_enable_pbde,
- phba->fcp_embed_io, phba->nvme_support,
+ phba->fcp_embed_io, sli4_params->nvme,
phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
@@ -12331,21 +13729,6 @@ fcponly:
else
phba->nsler = 0;
- /* Save PB info for use during HBA setup */
- sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
- sli4_params->mib_bde_cnt = bf_get(cfg_mib_bde_cnt, mbx_sli4_parameters);
- sli4_params->mib_size = mbx_sli4_parameters->mib_size;
- sli4_params->mi_value = LPFC_DFLT_MIB_VAL;
-
- /* Next we check for Vendor MIB support */
- if (sli4_params->mi_ver && phba->cfg_enable_mi)
- phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "6461 MIB attr %d enable %d FDMI %d buf %d:%d\n",
- sli4_params->mi_ver, phba->cfg_enable_mi,
- sli4_params->mi_value, sli4_params->mib_bde_cnt,
- sli4_params->mib_size);
return 0;
}
@@ -12978,7 +14361,9 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
const struct firmware *fw)
{
int rc;
+ u8 sli_family;
+ sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
/* Three cases: (1) FW was not supported on the detected adapter.
* (2) FW update has been locked out administratively.
* (3) Some other error during FW update.
@@ -12986,10 +14371,12 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
* for admin diagnosis.
*/
if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
- (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
+ (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
magic_number != MAGIC_NUMBER_G6) ||
- (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
- magic_number != MAGIC_NUMBER_G7)) {
+ (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
+ magic_number != MAGIC_NUMBER_G7) ||
+ (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
+ magic_number != MAGIC_NUMBER_G7P)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3030 This firmware version is not supported on"
" this HBA model. Device:%x Magic:%x Type:%x "
@@ -13377,6 +14764,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
+ if (phba->cgn_i)
+ lpfc_unreg_congestion_buf(phba);
lpfc_free_sysfs_attr(vport);
@@ -14041,17 +15430,18 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
void
lpfc_sli4_ras_init(struct lpfc_hba *phba)
{
- switch (phba->pcidev->device) {
- case PCI_DEVICE_ID_LANCER_G6_FC:
- case PCI_DEVICE_ID_LANCER_G7_FC:
+ /* if ASIC_GEN_NUM >= 0xC) */
+ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_6) ||
+ (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_FAMILY_G6)) {
phba->ras_fwlog.ras_hwsupport = true;
if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
phba->cfg_ras_fwlog_buffsize)
phba->ras_fwlog.ras_enabled = true;
else
phba->ras_fwlog.ras_enabled = false;
- break;
- default:
+ } else {
phba->ras_fwlog.ras_hwsupport = false;
}
}
@@ -14164,8 +15554,9 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba)
unsigned int temp_idx;
int i;
int j = 0;
- unsigned long rem_nsec;
- struct lpfc_vport **vports;
+ unsigned long rem_nsec, iflags;
+ bool log_verbose = false;
+ struct lpfc_vport *port_iterator;
/* Don't dump messages if we explicitly set log_verbose for the
* physical port or any vport.
@@ -14173,16 +15564,24 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba)
if (phba->cfg_log_verbose)
return;
- vports = lpfc_create_vport_work_array(phba);
- if (vports != NULL) {
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
- if (vports[i]->cfg_log_verbose) {
- lpfc_destroy_vport_work_array(phba, vports);
+ spin_lock_irqsave(&phba->port_list_lock, iflags);
+ list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+ if (port_iterator->load_flag & FC_UNLOADING)
+ continue;
+ if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
+ if (port_iterator->cfg_log_verbose)
+ log_verbose = true;
+
+ scsi_host_put(lpfc_shost_from_vport(port_iterator));
+
+ if (log_verbose) {
+ spin_unlock_irqrestore(&phba->port_list_lock,
+ iflags);
return;
}
}
}
- lpfc_destroy_vport_work_array(phba, vports);
+ spin_unlock_irqrestore(&phba->port_list_lock, iflags);
if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
return;
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 5660a8729462..7d480c798794 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -44,6 +44,9 @@
#define LOG_NVME_DISC 0x00200000 /* NVME Discovery/Connect events. */
#define LOG_NVME_ABTS 0x00400000 /* NVME ABTS events. */
#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */
+#define LOG_RSVD1 0x01000000 /* Reserved */
+#define LOG_RSVD2 0x02000000 /* Reserved */
+#define LOG_CGN_MGMT 0x04000000 /* Congestion Mgmt events */
#define LOG_TRACE_EVENT 0x80000000 /* Dmp the DBG log on this err */
#define LOG_ALL_MSG 0x7fffffff /* LOG all messages */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 84bc373190d8..6c754ee96bee 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -513,8 +513,9 @@ lpfc_init_link(struct lpfc_hba * phba,
break;
}
- if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
- phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+ /* Topology handling for ASIC_GEN_NUM 0xC and later */
+ if ((phba->sli4_hba.pc_sli4_params.sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
+ phba->sli4_hba.pc_sli4_params.if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
!(phba->sli4_hba.pc_sli4_params.pls) &&
mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index be54fbf5146f..870e53b8f81d 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -335,6 +335,19 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
phba->lpfc_cmd_rsp_buf_pool = NULL;
+ /* Free Congestion Data buffer */
+ if (phba->cgn_i) {
+ dma_free_coherent(&phba->pcidev->dev,
+ sizeof(struct lpfc_cgn_info),
+ phba->cgn_i->virt, phba->cgn_i->phys);
+ kfree(phba->cgn_i);
+ phba->cgn_i = NULL;
+ }
+
+ /* Free RX table */
+ kfree(phba->rxtable);
+ phba->rxtable = NULL;
+
/* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
psli->iocbq_lookup = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index e12f83fb795c..27263f02ab9f 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -736,9 +736,13 @@ out:
* is already in MAPPED or UNMAPPED state. Catch this
* condition and don't set the nlp_state again because
* it causes an unnecessary transport unregister/register.
+ *
+ * Nodes marked for ADISC will move MAPPED or UNMAPPED state
+ * after issuing ADISC
*/
if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
- if (ndlp->nlp_state != NLP_STE_MAPPED_NODE)
+ if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
+ !(ndlp->nlp_flag & NLP_NPR_ADISC))
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_MAPPED_NODE);
}
@@ -863,6 +867,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
}
out:
+ /* Unregister from backend, could have been skipped due to ADISC */
+ lpfc_nlp_unreg_node(vport, ndlp);
+
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
@@ -1677,9 +1684,6 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
- memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
- memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
-
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
lpfc_unreg_rpi(vport, ndlp);
@@ -2597,13 +2601,14 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
+ lpfc_disc_set_adisc(vport, ndlp);
+
ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
spin_unlock_irq(&ndlp->lock);
- lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -2645,14 +2650,13 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
- if (ndlp->nlp_flag & NLP_NPR_ADISC) {
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- spin_unlock_irq(&ndlp->lock);
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(vport, ndlp, 0);
- } else {
+ /*
+ * ADISC nodes will be handled in regular discovery path after
+ * receiving response from NS.
+ *
+ * For other nodes, Send PLOGI to trigger an implicit LOGO.
+ */
+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
@@ -2685,12 +2689,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
- if (ndlp->nlp_flag & NLP_NPR_ADISC) {
- ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(vport, ndlp, 0);
- } else {
+ /*
+ * ADISC nodes will be handled in regular discovery path after
+ * receiving response from NS.
+ *
+ * For other nodes, Send PLOGI to trigger an implicit LOGO.
+ */
+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index bcc804cefd30..73a3568ff17e 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -216,8 +216,8 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
/* The register rebind might have occurred before the delete
* downcall. Guard against this race.
*/
- if (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)
- ndlp->fc4_xpt_flags &= ~(NLP_WAIT_FOR_UNREG | NVME_XPT_REGD);
+ if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT)
+ ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD);
spin_unlock_irq(&ndlp->lock);
@@ -931,6 +931,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
uint32_t code, status, idx;
uint16_t cid, sqhd, data;
uint32_t *ptr;
+ uint32_t lat;
+ bool call_done = false;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int cpu;
#endif
@@ -1135,10 +1137,21 @@ out_err:
freqpriv = nCmd->private;
freqpriv->nvme_buf = NULL;
lpfc_ncmd->nvmeCmd = NULL;
- spin_unlock(&lpfc_ncmd->buf_lock);
+ call_done = true;
+ }
+ spin_unlock(&lpfc_ncmd->buf_lock);
+
+ /* Check if IO qualified for CMF */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF &&
+ nCmd->io_dir == NVMEFC_FCP_READ &&
+ nCmd->payload_length) {
+ /* Used when calculating average latency */
+ lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start;
+ lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL);
+ }
+
+ if (call_done)
nCmd->done(nCmd);
- } else
- spin_unlock(&lpfc_ncmd->buf_lock);
/* Call release with XB=1 to queue the IO into the abort list. */
lpfc_release_nvme_buf(phba, lpfc_ncmd);
@@ -1212,6 +1225,10 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 5 */
wqe->fcp_iread.rsrvd5 = 0;
+ /* For a CMF Managed port, iod must be zero'ed */
+ if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
+ bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
+ LPFC_WQE_IOD_NONE);
cstat->input_requests++;
}
} else {
@@ -1562,6 +1579,19 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
expedite = 1;
}
+ /* Check if IO qualifies for CMF */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF &&
+ pnvme_fcreq->io_dir == NVMEFC_FCP_READ &&
+ pnvme_fcreq->payload_length) {
+ ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length);
+ if (ret) {
+ ret = -EBUSY;
+ goto out_fail;
+ }
+ /* Get start time for IO latency */
+ start = ktime_get_ns();
+ }
+
/* The node is shared with FCP IO, make sure the IO pending count does
* not exceed the programmed depth.
*/
@@ -1576,7 +1606,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ndlp->cmd_qdepth);
atomic_inc(&lport->xmt_fcp_qdepth);
ret = -EBUSY;
- goto out_fail;
+ goto out_fail1;
}
}
@@ -1596,7 +1626,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
"idx %d DID %x\n",
lpfc_queue_info->index, ndlp->nlp_DID);
ret = -EBUSY;
- goto out_fail;
+ goto out_fail1;
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (start) {
@@ -1606,6 +1636,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_ncmd->ts_cmd_start = 0;
}
#endif
+ lpfc_ncmd->rx_cmd_start = start;
/*
* Store the data needed by the driver to issue, abort, and complete
@@ -1687,6 +1718,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
} else
cstat->control_requests--;
lpfc_release_nvme_buf(phba, lpfc_ncmd);
+ out_fail1:
+ lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT,
+ pnvme_fcreq->payload_length, NULL);
out_fail:
return ret;
}
@@ -2324,7 +2358,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* race that leaves the WAIT flag set.
*/
spin_lock_irq(&ndlp->lock);
- ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
+ ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
spin_unlock_irq(&ndlp->lock);
rport = remote_port->private;
@@ -2336,7 +2370,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
spin_lock_irq(&ndlp->lock);
ndlp->nrport = NULL;
- ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
+ ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
spin_unlock_irq(&ndlp->lock);
rport->ndlp = NULL;
rport->remoteport = NULL;
@@ -2488,7 +2522,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* The transport will update it.
*/
spin_lock_irq(&vport->phba->hbalock);
- ndlp->fc4_xpt_flags |= NLP_WAIT_FOR_UNREG;
+ ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
spin_unlock_irq(&vport->phba->hbalock);
/* Don't let the host nvme transport keep sending keep-alives
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index 69a5a844c69c..cc54ffb5c205 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -34,11 +34,8 @@
#define LPFC_NVME_FB_SHIFT 9
#define LPFC_NVME_MAX_FB (1 << 20) /* 1M */
-#define LPFC_MAX_NVME_INFO_TMP_LEN 100
-#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
-
-#define lpfc_ndlp_get_nrport(ndlp) \
- ((!ndlp->nrport || (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)) \
+#define lpfc_ndlp_get_nrport(ndlp) \
+ ((!ndlp->nrport || (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT))\
? NULL : ndlp->nrport)
struct lpfc_nvme_qhandle {
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index f2d9a3580887..6e3dd0b9bcfa 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1797,19 +1797,22 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
continue;
- spin_lock(&ctxp->ctxlock);
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
+ iflag);
+
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
/* Check if we already received a free context call
* and we have completed processing an abort situation.
*/
if (ctxp->flag & LPFC_NVME_CTX_RLS &&
!(ctxp->flag & LPFC_NVME_ABORT_OP)) {
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_del_init(&ctxp->list);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
released = true;
}
ctxp->flag &= ~LPFC_NVME_XBUSY;
- spin_unlock(&ctxp->ctxlock);
- spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
- iflag);
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
rrq_empty = list_empty(&phba->active_rrq_list);
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 1b248c237be1..0fde1e874c7a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -96,30 +96,6 @@ static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
struct lpfc_vmid *vmid);
-static inline unsigned
-lpfc_cmd_blksize(struct scsi_cmnd *sc)
-{
- return sc->device->sector_size;
-}
-
-#define LPFC_CHECK_PROTECT_GUARD 1
-#define LPFC_CHECK_PROTECT_REF 2
-static inline unsigned
-lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
-{
- return 1;
-}
-
-static inline unsigned
-lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
-{
- if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
- return 0;
- if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
- return 1;
- return 0;
-}
-
/**
* lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
* @phba: Pointer to HBA object.
@@ -683,7 +659,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
cpu = raw_smp_processor_id();
if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
- tag = blk_mq_unique_tag(cmnd->request);
+ tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
idx = blk_mq_unique_tag_to_hwq(tag);
} else {
idx = phba->sli4_hba.cpu_map[cpu].hdwq;
@@ -1046,13 +1022,13 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
return 0;
sgpe = scsi_prot_sglist(sc);
- lba = t10_pi_ref_tag(sc->request);
+ lba = scsi_prot_ref_tag(sc);
if (lba == LPFC_INVALID_REFTAG)
return 0;
/* First check if we need to match the LBA */
if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
- blksize = lpfc_cmd_blksize(sc);
+ blksize = scsi_prot_interval(sc);
numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
/* Make sure we have the right LBA if one is specified */
@@ -1441,7 +1417,7 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
{
uint8_t ret = 0;
- if (lpfc_cmd_guard_csum(sc)) {
+ if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
@@ -1521,7 +1497,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
{
uint8_t ret = 0;
- if (lpfc_cmd_guard_csum(sc)) {
+ if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
@@ -1629,7 +1605,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
goto out;
/* extract some info from the scsi command for pde*/
- reftag = t10_pi_ref_tag(sc->request);
+ reftag = scsi_prot_ref_tag(sc);
if (reftag == LPFC_INVALID_REFTAG)
goto out;
@@ -1668,12 +1644,12 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* protection data is automatically generated, not checked.
*/
if (datadir == DMA_FROM_DEVICE) {
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+ if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
bf_set(pde6_ce, pde6, checking);
else
bf_set(pde6_ce, pde6, 0);
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+ if (sc->prot_flags & SCSI_PROT_REF_CHECK)
bf_set(pde6_re, pde6, checking);
else
bf_set(pde6_re, pde6, 0);
@@ -1791,8 +1767,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
goto out;
/* extract some info from the scsi command */
- blksize = lpfc_cmd_blksize(sc);
- reftag = t10_pi_ref_tag(sc->request);
+ blksize = scsi_prot_interval(sc);
+ reftag = scsi_prot_ref_tag(sc);
if (reftag == LPFC_INVALID_REFTAG)
goto out;
@@ -1832,12 +1808,12 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bf_set(pde6_optx, pde6, txop);
bf_set(pde6_oprx, pde6, rxop);
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+ if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
bf_set(pde6_ce, pde6, checking);
else
bf_set(pde6_ce, pde6, 0);
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+ if (sc->prot_flags & SCSI_PROT_REF_CHECK)
bf_set(pde6_re, pde6, checking);
else
bf_set(pde6_re, pde6, 0);
@@ -2023,7 +1999,7 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
goto out;
/* extract some info from the scsi command for pde*/
- reftag = t10_pi_ref_tag(sc->request);
+ reftag = scsi_prot_ref_tag(sc);
if (reftag == LPFC_INVALID_REFTAG)
goto out;
@@ -2051,12 +2027,12 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* protection data is automatically generated, not checked.
*/
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+ if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
else
bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+ if (sc->prot_flags & SCSI_PROT_REF_CHECK)
bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
else
bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
@@ -2223,8 +2199,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
goto out;
/* extract some info from the scsi command */
- blksize = lpfc_cmd_blksize(sc);
- reftag = t10_pi_ref_tag(sc->request);
+ blksize = scsi_prot_interval(sc);
+ reftag = scsi_prot_ref_tag(sc);
if (reftag == LPFC_INVALID_REFTAG)
goto out;
@@ -2281,9 +2257,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
diseed->ref_tag = cpu_to_le32(reftag);
diseed->ref_tag_tran = diseed->ref_tag;
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
+ if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) {
bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
-
} else {
bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
/*
@@ -2300,7 +2275,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+ if (sc->prot_flags & SCSI_PROT_REF_CHECK)
bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
else
bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
@@ -2557,7 +2532,7 @@ lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
* DIF (trailer) attached to it. Must ajust FCP data length
* to account for the protection data.
*/
- fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
+ fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8;
return fcpdl;
}
@@ -2811,14 +2786,14 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* data length is a multiple of the blksize.
*/
sgde = scsi_sglist(cmd);
- blksize = lpfc_cmd_blksize(cmd);
+ blksize = scsi_prot_interval(cmd);
data_src = (uint8_t *)sg_virt(sgde);
data_len = sgde->length;
if ((data_len & (blksize - 1)) == 0)
chk_guard = 1;
src = (struct scsi_dif_tuple *)sg_virt(sgpe);
- start_ref_tag = t10_pi_ref_tag(cmd->request);
+ start_ref_tag = scsi_prot_ref_tag(cmd);
if (start_ref_tag == LPFC_INVALID_REFTAG)
goto out;
start_app_tag = src->app_tag;
@@ -2839,7 +2814,8 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
/* First Guard Tag checking */
if (chk_guard) {
guard_tag = src->guard_tag;
- if (lpfc_cmd_guard_csum(cmd))
+ if (cmd->prot_flags
+ & SCSI_PROT_IP_CHECKSUM)
sum = lpfc_bg_csum(data_src,
blksize);
else
@@ -2910,7 +2886,7 @@ out:
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
- t10_pi_ref_tag(cmd->request),
+ scsi_prot_ref_tag(cmd),
sum, guard_tag);
} else if (err_type == BGS_REFTAG_ERR_MASK) {
@@ -2920,7 +2896,7 @@ out:
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
- t10_pi_ref_tag(cmd->request),
+ scsi_prot_ref_tag(cmd),
ref_tag, start_ref_tag);
} else if (err_type == BGS_APPTAG_ERR_MASK) {
@@ -2930,7 +2906,7 @@ out:
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9041 BLKGRD: reftag %x app_tag err %x != %x\n",
- t10_pi_ref_tag(cmd->request),
+ scsi_prot_ref_tag(cmd),
app_tag, start_app_tag);
}
}
@@ -2992,7 +2968,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
" 0x%x lba 0x%llx blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
(unsigned long long)scsi_get_lba(cmd),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_logical_block_count(cmd), bgstat, bghm);
}
if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -3007,7 +2983,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
" 0x%x lba 0x%llx blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
(unsigned long long)scsi_get_lba(cmd),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_logical_block_count(cmd), bgstat, bghm);
}
if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -3022,7 +2998,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
" 0x%x lba 0x%llx blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
(unsigned long long)scsi_get_lba(cmd),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_logical_block_count(cmd), bgstat, bghm);
}
if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -3066,9 +3042,9 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
" 0x%x lba 0x%llx blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
(unsigned long long)scsi_get_lba(cmd),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_logical_block_count(cmd), bgstat, bghm);
- /* Calcuate what type of error it was */
+ /* Calculate what type of error it was */
lpfc_calc_bg_err(phba, lpfc_cmd);
}
return ret;
@@ -3103,8 +3079,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
"9072 BLKGRD: Invalid BG Profile in cmd "
"0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- t10_pi_ref_tag(cmd->request),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_prot_ref_tag(cmd),
+ scsi_logical_block_count(cmd), bgstat, bghm);
ret = (-1);
goto out;
}
@@ -3115,8 +3091,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
"9073 BLKGRD: Invalid BG PDIF Block in cmd "
"0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- t10_pi_ref_tag(cmd->request),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_prot_ref_tag(cmd),
+ scsi_logical_block_count(cmd), bgstat, bghm);
ret = (-1);
goto out;
}
@@ -3131,8 +3107,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
"9055 BLKGRD: Guard Tag error in cmd "
"0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- t10_pi_ref_tag(cmd->request),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_prot_ref_tag(cmd),
+ scsi_logical_block_count(cmd), bgstat, bghm);
}
if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -3146,8 +3122,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
"9056 BLKGRD: Ref Tag error in cmd "
"0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- t10_pi_ref_tag(cmd->request),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_prot_ref_tag(cmd),
+ scsi_logical_block_count(cmd), bgstat, bghm);
}
if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -3161,8 +3137,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
"9061 BLKGRD: App Tag error in cmd "
"0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- t10_pi_ref_tag(cmd->request),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_prot_ref_tag(cmd),
+ scsi_logical_block_count(cmd), bgstat, bghm);
}
if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -3205,10 +3181,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
"9057 BLKGRD: Unknown error in cmd "
"0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- t10_pi_ref_tag(cmd->request),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_prot_ref_tag(cmd),
+ scsi_logical_block_count(cmd), bgstat, bghm);
- /* Calcuate what type of error it was */
+ /* Calculate what type of error it was */
lpfc_calc_bg_err(phba, lpfc_cmd);
}
out:
@@ -3854,6 +3830,143 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
}
/**
+ * lpfc_unblock_requests - allow further commands to be queued.
+ * @phba: pointer to phba object
+ *
+ * For single vport, just call scsi_unblock_requests on physical port.
+ * For multiple vports, send scsi_unblock_requests for all the vports.
+ */
+void
+lpfc_unblock_requests(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ int i;
+
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ !phba->sli4_hba.max_cfg_param.vpi_used) {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_unblock_requests(shost);
+ return;
+ }
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_unblock_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_block_requests - prevent further commands from being queued.
+ * @phba: pointer to phba object
+ *
+ * For single vport, just call scsi_block_requests on physical port.
+ * For multiple vports, send scsi_block_requests for all the vports.
+ */
+void
+lpfc_block_requests(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ int i;
+
+ if (atomic_read(&phba->cmf_stop_io))
+ return;
+
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ !phba->sli4_hba.max_cfg_param.vpi_used) {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_block_requests(shost);
+ return;
+ }
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_block_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion
+ * @phba: The HBA for which this call is being executed.
+ * @time: The latency of the IO that completed (in ns)
+ * @size: The size of the IO that completed
+ * @shost: SCSI host the IO completed on (NULL for a NVME IO)
+ *
+ * The routine adjusts the various Burst and Bandwidth counters used in
+ * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT,
+ * that means the IO was never issued to the HBA, so this routine is
+ * just being called to cleanup the counter from a previous
+ * lpfc_update_cmf_cmd call.
+ */
+int
+lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
+ uint64_t time, uint32_t size, struct Scsi_Host *shost)
+{
+ struct lpfc_cgn_stat *cgs;
+
+ if (time != LPFC_CGN_NOT_SENT) {
+ /* lat is ns coming in, save latency in us */
+ if (time < 1000)
+ time = 1;
+ else
+ time = div_u64(time + 500, 1000); /* round it */
+
+ cgs = this_cpu_ptr(phba->cmf_stat);
+ atomic64_add(size, &cgs->rcv_bytes);
+ atomic64_add(time, &cgs->rx_latency);
+ atomic_inc(&cgs->rx_io_cnt);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission
+ * @phba: The HBA for which this call is being executed.
+ * @size: The size of the IO that will be issued
+ *
+ * The routine adjusts the various Burst and Bandwidth counters used in
+ * Congestion management and E2E.
+ */
+int
+lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
+{
+ uint64_t total;
+ struct lpfc_cgn_stat *cgs;
+ int cpu;
+
+ /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */
+ if (phba->cmf_active_mode == LPFC_CFG_MANAGED) {
+ total = 0;
+ for_each_present_cpu(cpu) {
+ cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+ total += atomic64_read(&cgs->total_bytes);
+ }
+ if (total >= phba->cmf_max_bytes_per_interval) {
+ if (!atomic_xchg(&phba->cmf_bw_wait, 1)) {
+ lpfc_block_requests(phba);
+ phba->cmf_last_ts =
+ lpfc_calc_cmf_latency(phba);
+ }
+ atomic_inc(&phba->cmf_busy);
+ return -EBUSY;
+ }
+ if (size > atomic_read(&phba->rx_max_read_cnt))
+ atomic_set(&phba->rx_max_read_cnt, size);
+ }
+
+ cgs = this_cpu_ptr(phba->cmf_stat);
+ atomic64_add(size, &cgs->total_bytes);
+ return 0;
+}
+
+/**
* lpfc_handle_fcp_err - FCP response handler
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_io_buf data structure.
@@ -4063,6 +4176,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
u32 logit = LOG_FCP;
u32 status, idx;
unsigned long iflags = 0;
+ u32 lat;
u8 wait_xb_clr = 0;
/* Sanity check on return of outstanding command */
@@ -4351,10 +4465,21 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_io_ktime(phba, lpfc_cmd);
}
#endif
+ if (likely(!wait_xb_clr))
+ lpfc_cmd->pCmd = NULL;
+ spin_unlock(&lpfc_cmd->buf_lock);
+
+ /* Check if IO qualified for CMF */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF &&
+ cmd->sc_data_direction == DMA_FROM_DEVICE &&
+ (scsi_sg_count(cmd))) {
+ /* Used when calculating average latency */
+ lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start;
+ lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost);
+ }
+
if (wait_xb_clr)
goto out;
- lpfc_cmd->pCmd = NULL;
- spin_unlock(&lpfc_cmd->buf_lock);
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
cmd->scsi_done(cmd);
@@ -4367,8 +4492,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
-out:
spin_unlock(&lpfc_cmd->buf_lock);
+out:
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
@@ -4775,6 +4900,11 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
fcp_cmnd->fcpCntl3 = READ_DATA;
if (hdwq)
hdwq->scsi_cstat.input_requests++;
+
+ /* For a CMF Managed port, iod must be zero'ed */
+ if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
+ bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
+ LPFC_WQE_IOD_NONE);
}
} else {
/* From the icmnd template, initialize words 4 - 11 */
@@ -5029,12 +5159,8 @@ lpfc_check_pci_resettable(struct lpfc_hba *phba)
}
/* Check for valid Emulex Device ID */
- switch (ptr->device) {
- case PCI_DEVICE_ID_LANCER_FC:
- case PCI_DEVICE_ID_LANCER_G6_FC:
- case PCI_DEVICE_ID_LANCER_G7_FC:
- break;
- default:
+ if (phba->sli_rev != LPFC_SLI_REV4 ||
+ phba->hba_flag & HBA_FCOE_MODE) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8347 Incapable PCI reset device: "
"0x%04x\n", ptr->device);
@@ -5423,13 +5549,9 @@ static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
*/
static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
{
- char *uuid = NULL;
+ struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
- if (cmd->request) {
- if (cmd->request->bio)
- uuid = blkcg_get_fc_appid(cmd->request->bio);
- }
- return uuid;
+ return bio ? blkcg_get_fc_appid(bio) : NULL;
}
/**
@@ -5462,7 +5584,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
if (phba->ktime_on)
start = ktime_get_ns();
#endif
-
+ start = ktime_get_ns();
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
/* sanity check on references */
@@ -5493,7 +5615,18 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
* transport is still transitioning.
*/
if (!ndlp)
- goto out_tgt_busy;
+ goto out_tgt_busy1;
+
+ /* Check if IO qualifies for CMF */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF &&
+ cmnd->sc_data_direction == DMA_FROM_DEVICE &&
+ (scsi_sg_count(cmnd))) {
+ /* Latency start time saved in rx_cmd_start later in routine */
+ err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd));
+ if (err)
+ goto out_tgt_busy1;
+ }
+
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
@@ -5521,7 +5654,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
ndlp->nlp_portname.u.wwn[5],
ndlp->nlp_portname.u.wwn[6],
ndlp->nlp_portname.u.wwn[7]);
- goto out_tgt_busy;
+ goto out_tgt_busy2;
}
}
@@ -5534,6 +5667,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
"IO busied\n");
goto out_host_busy;
}
+ lpfc_cmd->rx_cmd_start = start;
/*
* Store the midlayer's command structure for the completion phase
@@ -5557,8 +5691,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
"reftag x%x cnt %u pt %x\n",
dif_op_str[scsi_get_prot_op(cmnd)],
cmnd->cmnd[0],
- t10_pi_ref_tag(cmnd->request),
- blk_rq_sectors(cmnd->request),
+ scsi_prot_ref_tag(cmnd),
+ scsi_logical_block_count(cmnd),
(cmnd->cmnd[1]>>5));
}
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -5569,8 +5703,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
"9038 BLKGRD: rcvd PROT_NORMAL cmd: "
"x%x reftag x%x cnt %u pt %x\n",
cmnd->cmnd[0],
- t10_pi_ref_tag(cmnd->request),
- blk_rq_sectors(cmnd->request),
+ scsi_prot_ref_tag(cmnd),
+ scsi_logical_block_count(cmnd),
(cmnd->cmnd[1]>>5));
}
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -5641,8 +5775,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
bf_get(wqe_tmo,
&lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
- (uint32_t)
- (cmnd->request->timeout / 1000));
+ (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
goto out_host_busy_free_buf;
}
@@ -5678,13 +5811,20 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
out_host_busy_release_buf:
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
+ lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
+ shost);
return SCSI_MLQUEUE_HOST_BUSY;
- out_tgt_busy:
+ out_tgt_busy2:
+ lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
+ shost);
+ out_tgt_busy1:
return SCSI_MLQUEUE_TARGET_BUSY;
out_fail_command_release_buf:
lpfc_release_scsi_buf(phba, lpfc_cmd);
+ lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
+ shost);
out_fail_command:
cmnd->scsi_done(cmnd);
@@ -6273,6 +6413,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_scsi_event_header scsi_event;
int status;
u32 logit = LOG_FCP;
+ u32 dev_loss_tmo = vport->cfg_devloss_tmo;
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
@@ -6314,39 +6455,44 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
FCP_TARGET_RESET);
- if (status != SUCCESS)
- logit = LOG_TRACE_EVENT;
- spin_lock_irqsave(&pnode->lock, flags);
- if (status != SUCCESS &&
- (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)) &&
- !pnode->logo_waitq) {
- pnode->logo_waitq = &waitq;
- pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- pnode->nlp_flag |= NLP_ISSUE_LOGO;
- pnode->upcall_flags |= NLP_WAIT_FOR_LOGO;
- spin_unlock_irqrestore(&pnode->lock, flags);
- lpfc_unreg_rpi(vport, pnode);
- wait_event_timeout(waitq,
- (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)),
- msecs_to_jiffies(vport->cfg_devloss_tmo *
- 1000));
-
- if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0725 SCSI layer TGTRST failed & LOGO TMO "
- " (%d, %llu) return x%x\n", tgt_id,
- lun_id, status);
- spin_lock_irqsave(&pnode->lock, flags);
- pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
+ if (status != SUCCESS) {
+ logit = LOG_TRACE_EVENT;
+
+ /* Issue LOGO, if no LOGO is outstanding */
+ spin_lock_irqsave(&pnode->lock, flags);
+ if (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO) &&
+ !pnode->logo_waitq) {
+ pnode->logo_waitq = &waitq;
+ pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ pnode->nlp_flag |= NLP_ISSUE_LOGO;
+ pnode->upcall_flags |= NLP_WAIT_FOR_LOGO;
+ spin_unlock_irqrestore(&pnode->lock, flags);
+ lpfc_unreg_rpi(vport, pnode);
+ wait_event_timeout(waitq,
+ (!(pnode->upcall_flags &
+ NLP_WAIT_FOR_LOGO)),
+ msecs_to_jiffies(dev_loss_tmo *
+ 1000));
+
+ if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) {
+ lpfc_printf_vlog(vport, KERN_ERR, logit,
+ "0725 SCSI layer TGTRST "
+ "failed & LOGO TMO (%d, %llu) "
+ "return x%x\n",
+ tgt_id, lun_id, status);
+ spin_lock_irqsave(&pnode->lock, flags);
+ pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
+ } else {
+ spin_lock_irqsave(&pnode->lock, flags);
+ }
+ pnode->logo_waitq = NULL;
+ spin_unlock_irqrestore(&pnode->lock, flags);
+ status = SUCCESS;
+
} else {
- spin_lock_irqsave(&pnode->lock, flags);
+ spin_unlock_irqrestore(&pnode->lock, flags);
+ status = FAILED;
}
- pnode->logo_waitq = NULL;
- spin_unlock_irqrestore(&pnode->lock, flags);
- status = SUCCESS;
- } else {
- status = FAILED;
- spin_unlock_irqrestore(&pnode->lock, flags);
}
lpfc_printf_vlog(vport, KERN_ERR, logit,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index f76667b7da7b..3836d7f6a575 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -142,6 +142,10 @@ struct lpfc_scsicmd_bkt {
#define FC_PORTSPEED_128GBIT 0x2000
#endif
+#ifndef FC_PORTSPEED_256GBIT
+#define FC_PORTSPEED_256GBIT 0x4000
+#endif
+
#define TXRDY_PAYLOAD_LEN 12
/* For sysfs/debugfs tmp string max len */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f530d8fe7a8c..ffd8a140638c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1439,7 +1439,7 @@ out:
memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
iocbq->sli4_lxritag = NO_XRI;
iocbq->sli4_xritag = NO_XRI;
- iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
+ iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
LPFC_IO_NVME_LS);
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
@@ -1769,6 +1769,254 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
}
/**
+ * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @cmf_cmpl: Pointer to completed WCQE.
+ *
+ * This routine will inform the driver of any BW adjustments we need
+ * to make. These changes will be picked up during the next CMF
+ * timer interrupt. In addition, any BW changes will be logged
+ * with LOG_CGN_MGMT.
+ **/
+static void
+lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_wcqe_complete *cmf_cmpl)
+{
+ union lpfc_wqe128 *wqe;
+ uint32_t status, info;
+ uint64_t bw, bwdif, slop;
+ uint64_t pcent, bwpcent;
+ int asig, afpin, sigcnt, fpincnt;
+ int wsigmax, wfpinmax, cg, tdp;
+ char *s;
+
+ /* First check for error */
+ status = bf_get(lpfc_wcqe_c_status, cmf_cmpl);
+ if (status) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6211 CMF_SYNC_WQE Error "
+ "req_tag x%x status x%x hwstatus x%x "
+ "tdatap x%x parm x%x\n",
+ bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl),
+ bf_get(lpfc_wcqe_c_status, cmf_cmpl),
+ bf_get(lpfc_wcqe_c_hw_status, cmf_cmpl),
+ cmf_cmpl->total_data_placed,
+ cmf_cmpl->parameter);
+ goto out;
+ }
+
+ /* Gather congestion information on a successful cmpl */
+ info = cmf_cmpl->parameter;
+ phba->cmf_active_info = info;
+
+ /* See if firmware info count is valid or has changed */
+ if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
+ info = 0;
+ else
+ phba->cmf_info_per_interval = info;
+
+ tdp = bf_get(lpfc_wcqe_c_cmf_bw, cmf_cmpl);
+ cg = bf_get(lpfc_wcqe_c_cmf_cg, cmf_cmpl);
+
+ /* Get BW requirement from firmware */
+ bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
+ if (!bw) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6212 CMF_SYNC_WQE x%x: NULL bw\n",
+ bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl));
+ goto out;
+ }
+
+ /* Gather information needed for logging if a BW change is required */
+ wqe = &cmdiocb->wqe;
+ asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
+ afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
+ fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
+ sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
+ if (phba->cmf_max_bytes_per_interval != bw ||
+ (asig || afpin || sigcnt || fpincnt)) {
+ /* Are we increasing or decreasing BW */
+ if (phba->cmf_max_bytes_per_interval < bw) {
+ bwdif = bw - phba->cmf_max_bytes_per_interval;
+ s = "Increase";
+ } else {
+ bwdif = phba->cmf_max_bytes_per_interval - bw;
+ s = "Decrease";
+ }
+
+ /* What is the change percentage */
+ slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
+ pcent = div64_u64(bwdif * 100 + slop,
+ phba->cmf_link_byte_count);
+ bwpcent = div64_u64(bw * 100 + slop,
+ phba->cmf_link_byte_count);
+ if (asig) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6237 BW Threshold %lld%% (%lld): "
+ "%lld%% %s: Signal Alarm: cg:%d "
+ "Info:%u\n",
+ bwpcent, bw, pcent, s, cg,
+ phba->cmf_active_info);
+ } else if (afpin) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6238 BW Threshold %lld%% (%lld): "
+ "%lld%% %s: FPIN Alarm: cg:%d "
+ "Info:%u\n",
+ bwpcent, bw, pcent, s, cg,
+ phba->cmf_active_info);
+ } else if (sigcnt) {
+ wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6239 BW Threshold %lld%% (%lld): "
+ "%lld%% %s: Signal Warning: "
+ "Cnt %d Max %d: cg:%d Info:%u\n",
+ bwpcent, bw, pcent, s, sigcnt,
+ wsigmax, cg, phba->cmf_active_info);
+ } else if (fpincnt) {
+ wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6240 BW Threshold %lld%% (%lld): "
+ "%lld%% %s: FPIN Warning: "
+ "Cnt %d Max %d: cg:%d Info:%u\n",
+ bwpcent, bw, pcent, s, fpincnt,
+ wfpinmax, cg, phba->cmf_active_info);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6241 BW Threshold %lld%% (%lld): "
+ "CMF %lld%% %s: cg:%d Info:%u\n",
+ bwpcent, bw, pcent, s, cg,
+ phba->cmf_active_info);
+ }
+ } else if (info) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6246 Info Threshold %u\n", info);
+ }
+
+ /* Save BW change to be picked up during next timer interrupt */
+ phba->cmf_last_sync_bw = bw;
+out:
+ lpfc_sli_release_iocbq(phba, cmdiocb);
+}
+
+/**
+ * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
+ * @phba: Pointer to HBA context object.
+ * @ms: ms to set in WQE interval, 0 means use init op
+ * @total: Total rcv bytes for this interval
+ *
+ * This routine is called every CMF timer interrupt. Its purpose is
+ * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
+ * that may indicate we have congestion (FPINs or Signals). Upon
+ * completion, the firmware will indicate any BW restrictions the
+ * driver may need to take.
+ **/
+int
+lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
+{
+ union lpfc_wqe128 *wqe;
+ struct lpfc_iocbq *sync_buf;
+ unsigned long iflags;
+ u32 ret_val;
+ u32 atot, wtot, max;
+
+ /* First address any alarm / warning activity */
+ atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
+ wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
+
+ /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
+ if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
+ phba->link_state == LPFC_LINK_DOWN)
+ return 0;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ sync_buf = __lpfc_sli_get_iocbq(phba);
+ if (!sync_buf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
+ "6213 No available WQEs for CMF_SYNC_WQE\n");
+ ret_val = ENOMEM;
+ goto out_unlock;
+ }
+
+ wqe = &sync_buf->wqe;
+
+ /* WQEs are reused. Clear stale data and set key fields to zero */
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* If this is the very first CMF_SYNC_WQE, issue an init operation */
+ if (!ms) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6441 CMF Init %d - CMF_SYNC_WQE\n",
+ phba->fc_eventTag);
+ bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
+ bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
+ goto initpath;
+ }
+
+ bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
+ bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
+
+ /* Check for alarms / warnings */
+ if (atot) {
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+ /* We hit an Signal alarm condition */
+ bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
+ } else {
+ /* We hit a FPIN alarm condition */
+ bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
+ }
+ } else if (wtot) {
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
+ phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+ /* We hit an Signal warning condition */
+ max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
+ lpfc_acqe_cgn_frequency;
+ bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
+ bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
+ } else {
+ /* We hit a FPIN warning condition */
+ bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
+ bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
+ }
+ }
+
+ /* Update total read blocks during previous timer interval */
+ wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
+
+initpath:
+ bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
+ wqe->cmf_sync.event_tag = phba->fc_eventTag;
+ bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
+
+ /* Setup reqtag to match the wqe completion. */
+ bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
+
+ bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
+
+ bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
+ bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
+ bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
+
+ sync_buf->vport = phba->pport;
+ sync_buf->wqe_cmpl = lpfc_cmf_sync_cmpl;
+ sync_buf->iocb_cmpl = NULL;
+ sync_buf->context1 = NULL;
+ sync_buf->context2 = NULL;
+ sync_buf->context3 = NULL;
+ sync_buf->sli4_xritag = NO_XRI;
+
+ sync_buf->iocb_flag |= LPFC_IO_CMF;
+ ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
+ if (ret_val)
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
+ ret_val);
+out_unlock:
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return ret_val;
+}
+
+/**
* lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
@@ -4467,6 +4715,7 @@ lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
} else
phba->sli4_hba.intr_enable = 0;
+ phba->hba_flag &= ~HBA_SETUP;
return retval;
}
@@ -4787,6 +5036,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
phba->link_events = 0;
phba->pport->fc_myDID = 0;
phba->pport->fc_prevDID = 0;
+ phba->hba_flag &= ~HBA_SETUP;
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~(LPFC_PROCESS_LA);
@@ -5674,16 +5924,20 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
phba->sli4_hba.lnk_info.lnk_no =
bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
+ phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
+ phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
sizeof(phba->BIOSVersion));
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
+ "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
+ "flash_id: x%02x, asic_rev: x%02x\n",
phba->sli4_hba.lnk_info.lnk_tp,
phba->sli4_hba.lnk_info.lnk_no,
- phba->BIOSVersion);
+ phba->BIOSVersion, phba->sli4_hba.flash_id,
+ phba->sli4_hba.asic_rev);
out_free_mboxq:
if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -6413,6 +6667,7 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
uint32_t feature)
{
uint32_t len;
+ u32 sig_freq = 0;
len = sizeof(struct lpfc_mbx_set_feature) -
sizeof(struct lpfc_sli4_cfg_mhdr);
@@ -6435,6 +6690,35 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
mbox->u.mqe.un.set_feature.param_len = 8;
break;
+ case LPFC_SET_CGN_SIGNAL:
+ if (phba->cmf_active_mode == LPFC_CFG_OFF)
+ sig_freq = 0;
+ else
+ sig_freq = phba->cgn_sig_freq;
+
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+ bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
+ &mbox->u.mqe.un.set_feature, sig_freq);
+ bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
+ &mbox->u.mqe.un.set_feature, sig_freq);
+ }
+
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
+ bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
+ &mbox->u.mqe.un.set_feature, sig_freq);
+
+ if (phba->cmf_active_mode == LPFC_CFG_OFF ||
+ phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
+ sig_freq = 0;
+ else
+ sig_freq = lpfc_acqe_cgn_frequency;
+
+ bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
+ &mbox->u.mqe.un.set_feature, sig_freq);
+
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
+ mbox->u.mqe.un.set_feature.param_len = 12;
+ break;
case LPFC_SET_DUAL_DUMP:
bf_set(lpfc_mbx_set_feature_dd,
&mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
@@ -6443,8 +6727,22 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
mbox->u.mqe.un.set_feature.param_len = 4;
break;
+ case LPFC_SET_ENABLE_MI:
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
+ mbox->u.mqe.un.set_feature.param_len = 4;
+ bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
+ phba->pport->cfg_lun_queue_depth);
+ bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
+ phba->sli4_hba.pc_sli4_params.mi_ver);
+ break;
+ case LPFC_SET_ENABLE_CMF:
+ bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1);
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
+ mbox->u.mqe.un.set_feature.param_len = 4;
+ bf_set(lpfc_mbx_set_feature_cmf,
+ &mbox->u.mqe.un.set_feature, 1);
+ break;
}
-
return;
}
@@ -7365,7 +7663,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
mbox->u.mqe.un.set_host_data.param_len =
LPFC_HOST_OS_DRIVER_VERSION_SIZE;
- snprintf(mbox->u.mqe.un.set_host_data.data,
+ snprintf(mbox->u.mqe.un.set_host_data.un.data,
LPFC_HOST_OS_DRIVER_VERSION_SIZE,
"Linux %s v"LPFC_DRIVER_VERSION,
(phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
@@ -7433,6 +7731,91 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
return 1;
}
+static void
+lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ union lpfc_sli4_cfg_shdr *shdr;
+ u32 shdr_status, shdr_add_status;
+ u32 sig, acqe;
+
+ /* Two outcomes. (1) Set featurs was successul and EDC negotiation
+ * is done. (2) Mailbox failed and send FPIN support only.
+ */
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
+ "2516 CGN SET_FEATURE mbox failed with "
+ "status x%x add_status x%x, mbx status x%x "
+ "Reset Congestion to FPINs only\n",
+ shdr_status, shdr_add_status,
+ pmb->u.mb.mbxStatus);
+ /* If there is a mbox error, move on to RDF */
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
+ goto out;
+ }
+
+ /* Zero out Congestion Signal ACQE counter */
+ phba->cgn_acqe_cnt = 0;
+ atomic64_set(&phba->cgn_acqe_stat.warn, 0);
+ atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
+
+ acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
+ &pmb->u.mqe.un.set_feature);
+ sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
+ &pmb->u.mqe.un.set_feature);
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "4620 SET_FEATURES Success: Freq: %ds %dms "
+ " Reg: x%x x%x\n", acqe, sig,
+ phba->cgn_reg_signal, phba->cgn_reg_fpin);
+out:
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Register for FPIN events from the fabric now that the
+ * EDC common_set_features has completed.
+ */
+ lpfc_issue_els_rdf(vport, 0);
+}
+
+int
+lpfc_config_cgn_signal(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ u32 rc;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ goto out_rdf;
+
+ lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
+ "Reg: x%x x%x\n",
+ phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
+ phba->cgn_reg_signal, phba->cgn_reg_fpin);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ goto out;
+ return 0;
+
+out:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+out_rdf:
+ /* If there is a mbox error, move on to RDF */
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ lpfc_issue_els_rdf(phba->pport, 0);
+ return -EIO;
+}
+
/**
* lpfc_init_idle_stat_hb - Initialize idle_stat tracking
* @phba: pointer to lpfc hba data structure.
@@ -7464,7 +7847,8 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
idle_stat->prev_wall = wall;
- if (phba->nvmet_support)
+ if (phba->nvmet_support ||
+ phba->cmf_active_mode != LPFC_CFG_OFF)
cq->poll_mode = LPFC_QUEUE_WORK;
else
cq->poll_mode = LPFC_IRQ_POLL;
@@ -7496,6 +7880,258 @@ static void lpfc_sli4_dip(struct lpfc_hba *phba)
}
/**
+ * lpfc_cmf_setup - Initialize idle_stat tracking
+ * @phba: Pointer to HBA context object.
+ *
+ * This is called from HBA setup during driver load or when the HBA
+ * comes online. this does all the initialization to support CMF and MI.
+ **/
+static int
+lpfc_cmf_setup(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mqe *mqe;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_pc_sli4_params *sli4_params;
+ struct lpfc_sli4_parameters *mbx_sli4_parameters;
+ int length;
+ int rc, cmf, mi_ver;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+ mqe = &mboxq->u.mqe;
+
+ /* Read the port's SLI4 Config Parameters */
+ length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (unlikely(rc)) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
+ }
+
+ /* Gather info on CMF and MI support */
+ sli4_params = &phba->sli4_hba.pc_sli4_params;
+ mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
+ sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
+ sli4_params->cmf = bf_get(cfg_cmf, mbx_sli4_parameters);
+
+ /* Are we forcing MI off via module parameter? */
+ if (!phba->cfg_enable_mi)
+ sli4_params->mi_ver = 0;
+
+ /* Always try to enable MI feature if we can */
+ if (sli4_params->mi_ver) {
+ lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ mi_ver = bf_get(lpfc_mbx_set_feature_mi,
+ &mboxq->u.mqe.un.set_feature);
+
+ if (rc == MBX_SUCCESS) {
+ if (mi_ver) {
+ lpfc_printf_log(phba,
+ KERN_WARNING, LOG_CGN_MGMT,
+ "6215 MI is enabled\n");
+ sli4_params->mi_ver = mi_ver;
+ } else {
+ lpfc_printf_log(phba,
+ KERN_WARNING, LOG_CGN_MGMT,
+ "6338 MI is disabled\n");
+ sli4_params->mi_ver = 0;
+ }
+ } else {
+ /* mi_ver is already set from GET_SLI4_PARAMETERS */
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_CGN_MGMT | LOG_INIT,
+ "6245 Enable MI Mailbox x%x (x%x/x%x) "
+ "failed, rc:x%x mi:x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli_config_mbox_subsys_get
+ (phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get
+ (phba, mboxq),
+ rc, sli4_params->mi_ver);
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6217 MI is disabled\n");
+ }
+
+ /* Ensure FDMI is enabled for MI if enable_mi is set */
+ if (sli4_params->mi_ver)
+ phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
+
+ /* Always try to enable CMF feature if we can */
+ if (sli4_params->cmf) {
+ lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ cmf = bf_get(lpfc_mbx_set_feature_cmf,
+ &mboxq->u.mqe.un.set_feature);
+ if (rc == MBX_SUCCESS && cmf) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6218 CMF is enabled: mode %d\n",
+ phba->cmf_active_mode);
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_CGN_MGMT | LOG_INIT,
+ "6219 Enable CMF Mailbox x%x (x%x/x%x) "
+ "failed, rc:x%x dd:x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli_config_mbox_subsys_get
+ (phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get
+ (phba, mboxq),
+ rc, cmf);
+ sli4_params->cmf = 0;
+ phba->cmf_active_mode = LPFC_CFG_OFF;
+ goto no_cmf;
+ }
+
+ /* Allocate Congestion Information Buffer */
+ if (!phba->cgn_i) {
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (mp)
+ mp->virt = dma_alloc_coherent
+ (&phba->pcidev->dev,
+ sizeof(struct lpfc_cgn_info),
+ &mp->phys, GFP_KERNEL);
+ if (!mp || !mp->virt) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2640 Failed to alloc memory "
+ "for Congestion Info\n");
+ kfree(mp);
+ sli4_params->cmf = 0;
+ phba->cmf_active_mode = LPFC_CFG_OFF;
+ goto no_cmf;
+ }
+ phba->cgn_i = mp;
+
+ /* initialize congestion buffer info */
+ lpfc_init_congestion_buf(phba);
+ lpfc_init_congestion_stat(phba);
+ }
+
+ rc = lpfc_sli4_cgn_params_read(phba);
+ if (rc < 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+ "6242 Error reading Cgn Params (%d)\n",
+ rc);
+ /* Ensure CGN Mode is off */
+ sli4_params->cmf = 0;
+ } else if (!rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+ "6243 CGN Event empty object.\n");
+ /* Ensure CGN Mode is off */
+ sli4_params->cmf = 0;
+ }
+ } else {
+no_cmf:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6220 CMF is disabled\n");
+ }
+
+ /* Only register congestion buffer with firmware if BOTH
+ * CMF and E2E are enabled.
+ */
+ if (sli4_params->cmf && sli4_params->mi_ver) {
+ rc = lpfc_reg_congestion_buf(phba);
+ if (rc) {
+ dma_free_coherent(&phba->pcidev->dev,
+ sizeof(struct lpfc_cgn_info),
+ phba->cgn_i->virt, phba->cgn_i->phys);
+ kfree(phba->cgn_i);
+ phba->cgn_i = NULL;
+ /* Ensure CGN Mode is off */
+ phba->cmf_active_mode = LPFC_CFG_OFF;
+ return 0;
+ }
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "6470 Setup MI version %d CMF %d mode %d\n",
+ sli4_params->mi_ver, sli4_params->cmf,
+ phba->cmf_active_mode);
+
+ mempool_free(mboxq, phba->mbox_mem_pool);
+
+ /* Initialize atomic counters */
+ atomic_set(&phba->cgn_fabric_warn_cnt, 0);
+ atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
+ atomic_set(&phba->cgn_sync_alarm_cnt, 0);
+ atomic_set(&phba->cgn_sync_warn_cnt, 0);
+ atomic_set(&phba->cgn_driver_evt_cnt, 0);
+ atomic_set(&phba->cgn_latency_evt_cnt, 0);
+ atomic64_set(&phba->cgn_latency_evt, 0);
+
+ phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
+
+ /* Allocate RX Monitor Buffer */
+ if (!phba->rxtable) {
+ phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
+ sizeof(struct rxtable_entry),
+ GFP_KERNEL);
+ if (!phba->rxtable) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2644 Failed to alloc memory "
+ "for RX Monitor Buffer\n");
+ return -ENOMEM;
+ }
+ }
+ atomic_set(&phba->rxtable_idx_head, 0);
+ atomic_set(&phba->rxtable_idx_tail, 0);
+ return 0;
+}
+
+static int
+lpfc_set_host_tm(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ uint32_t len, rc;
+ struct timespec64 cur_time;
+ struct tm broken;
+ uint32_t month, day, year;
+ uint32_t hour, minute, second;
+ struct lpfc_mbx_set_host_date_time *tm;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ len = sizeof(struct lpfc_mbx_set_host_data) -
+ sizeof(struct lpfc_sli4_cfg_mhdr);
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
+ LPFC_SLI4_MBX_EMBED);
+
+ mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
+ mboxq->u.mqe.un.set_host_data.param_len =
+ sizeof(struct lpfc_mbx_set_host_date_time);
+ tm = &mboxq->u.mqe.un.set_host_data.un.tm;
+ ktime_get_real_ts64(&cur_time);
+ time64_to_tm(cur_time.tv_sec, 0, &broken);
+ month = broken.tm_mon + 1;
+ day = broken.tm_mday;
+ year = broken.tm_year - 100;
+ hour = broken.tm_hour;
+ minute = broken.tm_min;
+ second = broken.tm_sec;
+ bf_set(lpfc_mbx_set_host_month, tm, month);
+ bf_set(lpfc_mbx_set_host_day, tm, day);
+ bf_set(lpfc_mbx_set_host_year, tm, year);
+ bf_set(lpfc_mbx_set_host_hour, tm, hour);
+ bf_set(lpfc_mbx_set_host_min, tm, minute);
+ bf_set(lpfc_mbx_set_host_sec, tm, second);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
* lpfc_sli4_hba_setup - SLI4 device initialization PCI function
* @phba: Pointer to HBA context object.
*
@@ -7584,6 +8220,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
+ rc = lpfc_set_host_tm(phba);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "6468 Set host date / time: Status x%x:\n", rc);
+
/*
* Continue initialization with default values even if driver failed
* to read FCoE param config regions, only read parameters if the
@@ -8111,6 +8751,9 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Indicate device interrupt mode */
phba->sli4_hba.intr_enable = 1;
+ /* Setup CMF after HBA is initialized */
+ lpfc_cmf_setup(phba);
+
if (!(phba->hba_flag & HBA_FCOE_MODE) &&
(phba->hba_flag & LINK_DISABLED)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -8132,7 +8775,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
}
mempool_free(mboxq, phba->mbox_mem_pool);
+
+ phba->hba_flag |= HBA_SETUP;
return rc;
+
out_io_buff_free:
/* Free allocated IO Buffers */
lpfc_io_free(phba);
@@ -8790,8 +9436,11 @@ static int
lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
+ LPFC_MBOXQ_t *mboxq;
int rc = 0;
unsigned long timeout = 0;
+ u32 sli_flag;
+ u8 cmd, subsys, opcode;
/* Mark the asynchronous mailbox command posting as blocked */
spin_lock_irq(&phba->hbalock);
@@ -8809,12 +9458,37 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
if (timeout)
lpfc_sli4_process_missed_mbox_completions(phba);
- /* Wait for the outstnading mailbox command to complete */
+ /* Wait for the outstanding mailbox command to complete */
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
msleep(2);
if (time_after(jiffies, timeout)) {
- /* Timeout, marked the outstanding cmd not complete */
+ /* Timeout, mark the outstanding cmd not complete */
+
+ /* Sanity check sli.mbox_active has not completed or
+ * cancelled from another context during last 2ms sleep,
+ * so take hbalock to be sure before logging.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli.mbox_active) {
+ mboxq = phba->sli.mbox_active;
+ cmd = mboxq->u.mb.mbxCommand;
+ subsys = lpfc_sli_config_mbox_subsys_get(phba,
+ mboxq);
+ opcode = lpfc_sli_config_mbox_opcode_get(phba,
+ mboxq);
+ sli_flag = psli->sli_flag;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2352 Mailbox command x%x "
+ "(x%x/x%x) sli_flag x%x could "
+ "not complete\n",
+ cmd, subsys, opcode,
+ sli_flag);
+ } else {
+ spin_unlock_irq(&phba->hbalock);
+ }
+
rc = 1;
break;
}
@@ -9763,6 +10437,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
*pcmd == ELS_CMD_SCR ||
*pcmd == ELS_CMD_RDF ||
+ *pcmd == ELS_CMD_EDC ||
*pcmd == ELS_CMD_RSCN_XMT ||
*pcmd == ELS_CMD_FDISC ||
*pcmd == ELS_CMD_LOGO ||
@@ -10097,8 +10772,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
- pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
- iocbq->context2)->virt);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
@@ -11619,6 +12292,7 @@ void
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
IOCB_t *irsp = &rspiocb->iocb;
/* ELS cmd tag <ulpIoTag> completes */
@@ -11627,11 +12301,16 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"x%x x%x x%x\n",
irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout);
- lpfc_nlp_put((struct lpfc_nodelist *)cmdiocb->context1);
+ /*
+ * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
+ * if exchange is busy.
+ */
if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
lpfc_ct_free_iocb(phba, cmdiocb);
else
lpfc_els_free_iocb(phba, cmdiocb);
+
+ lpfc_nlp_put(ndlp);
}
/**
@@ -14626,8 +15305,12 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
switch (cq->poll_mode) {
case LPFC_IRQ_POLL:
- irq_poll_sched(&cq->iop);
- break;
+ /* CGN mgmt is mutually exclusive from softirq processing */
+ if (phba->cmf_active_mode == LPFC_CFG_OFF) {
+ irq_poll_sched(&cq->iop);
+ break;
+ }
+ fallthrough;
case LPFC_QUEUE_WORK:
default:
if (is_kdump_kernel())
@@ -20021,6 +20704,91 @@ out:
}
/**
+ * lpfc_log_fw_write_cmpl - logs firmware write completion status
+ * @phba: pointer to lpfc hba data structure
+ * @shdr_status: wr_object rsp's status field
+ * @shdr_add_status: wr_object rsp's add_status field
+ * @shdr_add_status_2: wr_object rsp's add_status_2 field
+ * @shdr_change_status: wr_object rsp's change_status field
+ * @shdr_csf: wr_object rsp's csf bit
+ *
+ * This routine is intended to be called after a firmware write completes.
+ * It will log next action items to be performed by the user to instantiate
+ * the newly downloaded firmware or reason for incompatibility.
+ **/
+static void
+lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
+ u32 shdr_add_status, u32 shdr_add_status_2,
+ u32 shdr_change_status, u32 shdr_csf)
+{
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "4198 %s: flash_id x%02x, asic_rev x%02x, "
+ "status x%02x, add_status x%02x, add_status_2 x%02x, "
+ "change_status x%02x, csf %01x\n", __func__,
+ phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
+ shdr_status, shdr_add_status, shdr_add_status_2,
+ shdr_change_status, shdr_csf);
+
+ if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
+ switch (shdr_add_status_2) {
+ case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "4199 Firmware write failed: "
+ "image incompatible with flash x%02x\n",
+ phba->sli4_hba.flash_id);
+ break;
+ case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "4200 Firmware write failed: "
+ "image incompatible with ASIC "
+ "architecture x%02x\n",
+ phba->sli4_hba.asic_rev);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "4210 Firmware write failed: "
+ "add_status_2 x%02x\n",
+ shdr_add_status_2);
+ break;
+ }
+ } else if (!shdr_status && !shdr_add_status) {
+ if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
+ shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
+ if (shdr_csf)
+ shdr_change_status =
+ LPFC_CHANGE_STATUS_PCI_RESET;
+ }
+
+ switch (shdr_change_status) {
+ case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "3198 Firmware write complete: System "
+ "reboot required to instantiate\n");
+ break;
+ case (LPFC_CHANGE_STATUS_FW_RESET):
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "3199 Firmware write complete: "
+ "Firmware reset required to "
+ "instantiate\n");
+ break;
+ case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "3200 Firmware write complete: Port "
+ "Migration or PCI Reset required to "
+ "instantiate\n");
+ break;
+ case (LPFC_CHANGE_STATUS_PCI_RESET):
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "3201 Firmware write complete: PCI "
+ "Reset required to instantiate\n");
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
* lpfc_wr_object - write an object to the firmware
* @phba: HBA structure that indicates port to create a queue on.
* @dmabuf_list: list of dmabufs to write to the port.
@@ -20046,7 +20814,8 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
struct lpfc_mbx_wr_object *wr_object;
LPFC_MBOXQ_t *mbox;
int rc = 0, i = 0;
- uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
+ uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
+ uint32_t shdr_change_status = 0, shdr_csf = 0;
uint32_t mbox_tmo;
struct lpfc_dmabuf *dmabuf;
uint32_t written = 0;
@@ -20100,58 +20869,36 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
&wr_object->header.cfg_shdr.response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
&wr_object->header.cfg_shdr.response);
+ shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
+ &wr_object->header.cfg_shdr.response);
if (check_change_status) {
shdr_change_status = bf_get(lpfc_wr_object_change_status,
&wr_object->u.response);
-
- if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
- shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
- shdr_csf = bf_get(lpfc_wr_object_csf,
- &wr_object->u.response);
- if (shdr_csf)
- shdr_change_status =
- LPFC_CHANGE_STATUS_PCI_RESET;
- }
-
- switch (shdr_change_status) {
- case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3198 Firmware write complete: System "
- "reboot required to instantiate\n");
- break;
- case (LPFC_CHANGE_STATUS_FW_RESET):
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3199 Firmware write complete: Firmware"
- " reset required to instantiate\n");
- break;
- case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3200 Firmware write complete: Port "
- "Migration or PCI Reset required to "
- "instantiate\n");
- break;
- case (LPFC_CHANGE_STATUS_PCI_RESET):
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3201 Firmware write complete: PCI "
- "Reset required to instantiate\n");
- break;
- default:
- break;
- }
+ shdr_csf = bf_get(lpfc_wr_object_csf,
+ &wr_object->u.response);
}
+
if (!phba->sli4_hba.intr_enable)
mempool_free(mbox, phba->mbox_mem_pool);
else if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
- if (shdr_status || shdr_add_status || rc) {
+ if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3025 Write Object mailbox failed with "
- "status x%x add_status x%x, mbx status x%x\n",
- shdr_status, shdr_add_status, rc);
+ "status x%x add_status x%x, add_status_2 x%x, "
+ "mbx status x%x\n",
+ shdr_status, shdr_add_status, shdr_add_status_2,
+ rc);
rc = -ENXIO;
*offset = shdr_add_status;
- } else
+ } else {
*offset += wr_object->u.response.actual_write_length;
+ }
+
+ if (rc || check_change_status)
+ lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
+ shdr_add_status_2, shdr_change_status,
+ shdr_csf);
return rc;
}
@@ -20543,8 +21290,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
/* NVME_FCREQ and NVME_ABTS requests */
- if (pwqe->iocb_flag & LPFC_IO_NVME ||
- pwqe->iocb_flag & LPFC_IO_FCP) {
+ if (pwqe->iocb_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
wq = qp->io_wq;
pring = wq->pring;
@@ -21323,6 +22069,116 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
}
/**
+ * lpfc_read_object - Retrieve object data from HBA
+ * @phba: The HBA for which this call is being executed.
+ * @rdobject: Pathname of object data we want to read.
+ * @datap: Pointer to where data will be copied to.
+ * @datasz: size of data area
+ *
+ * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
+ * The data will be truncated if datasz is not large enough.
+ * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
+ * Returns the actual bytes read from the object.
+ */
+int
+lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
+ uint32_t datasz)
+{
+ struct lpfc_mbx_read_object *read_object;
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, eof, j, byte_cnt = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_dmabuf *pcmd;
+
+ /* sanity check on queue memory */
+ if (!datap)
+ return -ENODEV;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_read_object) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_READ_OBJECT,
+ length, LPFC_SLI4_MBX_EMBED);
+ read_object = &mbox->u.mqe.un.read_object;
+ shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
+
+ bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
+ bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
+ read_object->u.request.rd_object_offset = 0;
+ read_object->u.request.rd_object_cnt = 1;
+
+ memset((void *)read_object->u.request.rd_object_name, 0,
+ LPFC_OBJ_NAME_SZ);
+ sprintf((uint8_t *)read_object->u.request.rd_object_name, rdobject);
+ for (j = 0; j < strlen(rdobject); j++)
+ read_object->u.request.rd_object_name[j] =
+ cpu_to_le32(read_object->u.request.rd_object_name[j]);
+
+ pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
+ if (pcmd)
+ pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
+ if (!pcmd || !pcmd->virt) {
+ kfree(pcmd);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return -ENOMEM;
+ }
+ memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
+ read_object->u.request.rd_object_hbuf[0].pa_lo =
+ putPaddrLow(pcmd->phys);
+ read_object->u.request.rd_object_hbuf[0].pa_hi =
+ putPaddrHigh(pcmd->phys);
+ read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
+
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->ctx_buf = NULL;
+ mbox->ctx_ndlp = NULL;
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+ if (shdr_status == STATUS_FAILED &&
+ shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
+ "4674 No port cfg file in FW.\n");
+ byte_cnt = -ENOENT;
+ } else if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
+ "2625 READ_OBJECT mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ byte_cnt = -ENXIO;
+ } else {
+ /* Success */
+ length = read_object->u.response.rd_object_actual_rlen;
+ eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
+ "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
+ length, datasz, eof);
+
+ /* Detect the port config file exists but is empty */
+ if (!length && eof) {
+ byte_cnt = 0;
+ goto exit;
+ }
+
+ byte_cnt = length;
+ lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
+ }
+
+ exit:
+ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+ kfree(pcmd);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return byte_cnt;
+}
+
+/**
* lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
* @phba: The HBA for which this call is being executed.
* @lpfc_buf: IO buf structure to append the SGL chunk
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index dde8eb9d796d..5161ccacea3e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -107,6 +107,7 @@ struct lpfc_iocbq {
#define LPFC_IO_NVME_LS 0x400000 /* NVME LS command */
#define LPFC_IO_NVMET 0x800000 /* NVMET command */
#define LPFC_IO_VMID 0x1000000 /* VMID tagged IO */
+#define LPFC_IO_CMF 0x4000000 /* CMF command */
uint32_t drvrTimeout; /* driver timeout in seconds */
struct lpfc_vport *vport;/* virtual port pointer */
@@ -462,4 +463,5 @@ struct lpfc_io_buf {
uint64_t ts_isr_cmpl;
uint64_t ts_data_io;
#endif
+ uint64_t rx_cmd_start;
};
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 26f19c95380f..99c5d1e4da5e 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -557,6 +557,7 @@ struct lpfc_pc_sli4_params {
uint16_t mi_value;
#define LPFC_DFLT_MIB_VAL 2
uint8_t mib_bde_cnt;
+ uint8_t cmf;
uint8_t cqv;
uint8_t mqv;
uint8_t wqv;
@@ -978,6 +979,8 @@ struct lpfc_sli4_hba {
#define lpfc_conf_trunk_port3_nd_WORD conf_trunk
#define lpfc_conf_trunk_port3_nd_SHIFT 7
#define lpfc_conf_trunk_port3_nd_MASK 0x1
+ uint8_t flash_id;
+ uint8_t asic_rev;
};
enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 2d62fd2a9824..a7aba7833425 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.8.0.10"
+#define LPFC_DRIVER_VERSION "14.0.0.1"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index ec10b2497310..e4298bf4a482 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1451,10 +1451,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
* pthru timeout to the os layer timeout value.
*/
if (scp->device->type == TYPE_TAPE) {
- if ((scp->request->timeout / HZ) > 0xFFFF)
+ if (scsi_cmd_to_rq(scp)->timeout / HZ > 0xFFFF)
pthru->timeout = cpu_to_le16(0xFFFF);
else
- pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
+ pthru->timeout = cpu_to_le16(scsi_cmd_to_rq(scp)->timeout / HZ);
}
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 06399c026a8d..26d0cf9353dd 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -402,7 +402,7 @@ megasas_get_msix_index(struct megasas_instance *instance,
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
instance->msix_vectors));
} else if (instance->host->nr_hw_queues > 1) {
- u32 tag = blk_mq_unique_tag(scmd->request);
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
instance->low_latency_index_start;
@@ -3023,7 +3023,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
io_request->DevHandle = cpu_to_le16(device_id);
io_request->LUN[1] = scmd->device->lun;
pRAID_Context->timeout_value =
- cpu_to_le16 (scmd->request->timeout / HZ);
+ cpu_to_le16(scsi_cmd_to_rq(scmd)->timeout / HZ);
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -3086,7 +3086,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
device_id = MEGASAS_DEV_INDEX(scmd);
pd_index = MEGASAS_PD_INDEX(scmd);
- os_timeout_value = scmd->request->timeout / HZ;
+ os_timeout_value = scsi_cmd_to_rq(scmd)->timeout / HZ;
mr_device_priv_data = scmd->device->hostdata;
cmd->pd_interface = mr_device_priv_data->interface_type;
@@ -3381,7 +3381,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
return SCSI_MLQUEUE_HOST_BUSY;
}
- cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
+ cmd = megasas_get_cmd_fusion(instance, scsi_cmd_to_rq(scmd)->tag);
if (!cmd) {
atomic_dec(&instance->fw_outstanding);
@@ -3422,7 +3422,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
*/
if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
r1_cmd = megasas_get_cmd_fusion(instance,
- (scmd->request->tag + instance->max_fw_cmds));
+ scsi_cmd_to_rq(scmd)->tag + instance->max_fw_cmds);
megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd);
}
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 6f5dc9e78553..9787b53a2b59 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -183,6 +183,20 @@ enum mpi3mr_iocstate {
MRIOC_STATE_UNRECOVERABLE,
};
+/* Init type definitions */
+enum mpi3mr_init_type {
+ MPI3MR_IT_INIT = 0,
+ MPI3MR_IT_RESET,
+ MPI3MR_IT_RESUME,
+};
+
+/* Cleanup reason definitions */
+enum mpi3mr_cleanup_reason {
+ MPI3MR_COMPLETE_CLEANUP = 0,
+ MPI3MR_REINIT_FAILURE,
+ MPI3MR_SUSPEND,
+};
+
/* Reset reason code definitions*/
enum mpi3mr_reset_reason {
MPI3MR_RESET_FROM_BRINGUP = 1,
@@ -855,8 +869,8 @@ struct delayed_dev_rmhs_node {
int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc);
void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc);
-int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init);
-void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 re_init);
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type);
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason);
int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async);
int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
u16 admin_req_sz, u8 ignore_reset);
@@ -872,6 +886,7 @@ void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
u64 sense_buf_dma);
+void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc);
void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
struct mpi3_event_notification_reply *event_reply);
void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 2dba2b0af166..4a8316c6bd41 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -3205,7 +3205,7 @@ out_failed:
/**
* mpi3mr_init_ioc - Initialize the controller
* @mrioc: Adapter instance reference
- * @re_init: Flag to indicate is this fresh init or re-init
+ * @init_type: Flag to indicate is the init_type
*
* This the controller initialization routine, executed either
* after soft reset or from pci probe callback.
@@ -3218,7 +3218,7 @@ out_failed:
*
* Return: 0 on success and non-zero on failure.
*/
-int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type)
{
int retval = 0;
enum mpi3mr_iocstate ioc_state;
@@ -3229,7 +3229,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP;
mrioc->change_count = 0;
- if (!re_init) {
+ if (init_type == MPI3MR_IT_INIT) {
mrioc->cpu_count = num_online_cpus();
retval = mpi3mr_setup_resources(mrioc);
if (retval) {
@@ -3314,7 +3314,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
goto out_failed;
}
- if (!re_init) {
+ if (init_type != MPI3MR_IT_RESET) {
retval = mpi3mr_setup_isr(mrioc, 1);
if (retval) {
ioc_err(mrioc, "Failed to setup ISR error %d\n",
@@ -3332,7 +3332,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
}
mpi3mr_process_factsdata(mrioc, &facts_data);
- if (!re_init) {
+ if (init_type == MPI3MR_IT_INIT) {
retval = mpi3mr_check_reset_dma_mask(mrioc);
if (retval) {
ioc_err(mrioc, "Resetting dma mask failed %d\n",
@@ -3351,7 +3351,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
goto out_failed;
}
- if (!re_init) {
+ if (init_type == MPI3MR_IT_INIT) {
retval = mpi3mr_alloc_chain_bufs(mrioc);
if (retval) {
ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
@@ -3374,7 +3374,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
writel(mrioc->sbq_host_index,
&mrioc->sysif_regs->sense_buffer_free_host_index);
- if (!re_init) {
+ if (init_type != MPI3MR_IT_RESET) {
retval = mpi3mr_setup_isr(mrioc, 0);
if (retval) {
ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
@@ -3390,7 +3390,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
goto out_failed;
}
- if (re_init &&
+ if ((init_type != MPI3MR_IT_INIT) &&
(mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q)) {
retval = -1;
ioc_err(mrioc,
@@ -3422,7 +3422,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
goto out_failed;
}
- if (re_init) {
+ if (init_type != MPI3MR_IT_INIT) {
ioc_info(mrioc, "Issuing Port Enable\n");
retval = mpi3mr_issue_port_enable(mrioc, 0);
if (retval) {
@@ -3434,7 +3434,10 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
return retval;
out_failed:
- mpi3mr_cleanup_ioc(mrioc, re_init);
+ if (init_type == MPI3MR_IT_INIT)
+ mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
+ else
+ mpi3mr_cleanup_ioc(mrioc, MPI3MR_REINIT_FAILURE);
out_nocleanup:
return retval;
}
@@ -3495,7 +3498,7 @@ static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
*
* Return: Nothing.
*/
-static void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
+void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
{
u16 i;
@@ -3710,7 +3713,7 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
/**
* mpi3mr_cleanup_ioc - Cleanup controller
* @mrioc: Adapter instance reference
- * @re_init: Cleanup due to a reinit or not
+ * @reason: Cleanup reason
*
* controller cleanup handler, Message unit reset or soft reset
* and shutdown notification is issued to the controller and the
@@ -3718,11 +3721,11 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
*
* Return: Nothing.
*/
-void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason)
{
enum mpi3mr_iocstate ioc_state;
- if (!re_init)
+ if (reason == MPI3MR_COMPLETE_CLEANUP)
mpi3mr_stop_watchdog(mrioc);
mpi3mr_ioc_disable_intr(mrioc);
@@ -3737,11 +3740,11 @@ void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
MPI3MR_RESET_FROM_MUR_FAILURE);
- if (!re_init)
+ if (reason != MPI3MR_REINIT_FAILURE)
mpi3mr_issue_ioc_shutdown(mrioc);
}
- if (!re_init) {
+ if (reason == MPI3MR_COMPLETE_CLEANUP) {
mpi3mr_free_mem(mrioc);
mpi3mr_cleanup_resources(mrioc);
}
@@ -3923,7 +3926,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mpi3mr_flush_host_io(mrioc);
mpi3mr_invalidate_devhandles(mrioc);
mpi3mr_memset_buffers(mrioc);
- retval = mpi3mr_init_ioc(mrioc, 1);
+ retval = mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESET);
if (retval) {
pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
mrioc->name, reset_reason);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 24ac7ddec749..2197988333fe 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -50,7 +50,7 @@ static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
u32 unique_tag;
u16 host_tag, hw_queue;
- unique_tag = blk_mq_unique_tag(scmd->request);
+ unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
if (hw_queue >= mrioc->num_op_reply_q)
@@ -1963,7 +1963,6 @@ static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
{
u16 eedp_flags = 0;
unsigned char prot_op = scsi_get_prot_op(scmd);
- unsigned char prot_type = scsi_get_prot_type(scmd);
switch (prot_op) {
case SCSI_PROT_NORMAL:
@@ -1983,60 +1982,42 @@ static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
break;
case SCSI_PROT_READ_PASS:
- eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK |
- MPI3_EEDPFLAGS_CHK_REF_TAG | MPI3_EEDPFLAGS_CHK_APP_TAG |
- MPI3_EEDPFLAGS_CHK_GUARD;
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
break;
case SCSI_PROT_WRITE_PASS:
- if (scsi_host_get_guard(scmd->device->host)
- & SHOST_DIX_GUARD_IP) {
- eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN |
- MPI3_EEDPFLAGS_CHK_APP_TAG |
- MPI3_EEDPFLAGS_CHK_GUARD |
- MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
+ if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) {
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN;
scsiio_req->sgl[0].eedp.application_tag_translation_mask =
0xffff;
- } else {
- eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK |
- MPI3_EEDPFLAGS_CHK_REF_TAG |
- MPI3_EEDPFLAGS_CHK_APP_TAG |
- MPI3_EEDPFLAGS_CHK_GUARD;
- }
+ } else
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
+
scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
break;
default:
return;
}
- if (scsi_host_get_guard(scmd->device->host) & SHOST_DIX_GUARD_IP)
+ if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
+ eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD;
+
+ if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
- switch (prot_type) {
- case SCSI_PROT_DIF_TYPE0:
- eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
+ if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
+ eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG |
+ MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
scsiio_req->cdb.eedp32.primary_reference_tag =
- cpu_to_be32(t10_pi_ref_tag(scmd->request));
- break;
- case SCSI_PROT_DIF_TYPE1:
- case SCSI_PROT_DIF_TYPE2:
- eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG |
- MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE |
- MPI3_EEDPFLAGS_CHK_GUARD;
- scsiio_req->cdb.eedp32.primary_reference_tag =
- cpu_to_be32(t10_pi_ref_tag(scmd->request));
- break;
- case SCSI_PROT_DIF_TYPE3:
- eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD |
- MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
- break;
-
- default:
- scsiio_req->msg_flags &= ~(MPI3_SCSIIO_MSGFLAGS_METASGL_VALID);
- return;
+ cpu_to_be32(scsi_prot_ref_tag(scmd));
}
- switch (scmd->device->sector_size) {
+ if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT)
+ eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
+
+ eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
+
+ switch (scsi_prot_interval(scmd)) {
case 512:
scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
break;
@@ -3451,7 +3432,7 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
u16 dev_handle;
u16 host_tag;
u32 scsiio_flags = 0;
- struct request *rq = scmd->request;
+ struct request *rq = scsi_cmd_to_rq(scmd);
int iprio_class;
sdev_priv_data = scmd->device->hostdata;
@@ -3795,7 +3776,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
mrioc->is_driver_loading = 1;
- if (mpi3mr_init_ioc(mrioc, 0)) {
+ if (mpi3mr_init_ioc(mrioc, MPI3MR_IT_INIT)) {
ioc_err(mrioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
retval = -ENODEV;
@@ -3818,7 +3799,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return retval;
addhost_failed:
- mpi3mr_cleanup_ioc(mrioc, 0);
+ mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
out_iocinit_failed:
destroy_workqueue(mrioc->fwevt_worker_thread);
out_fwevtthread_failed:
@@ -3870,7 +3851,7 @@ static void mpi3mr_remove(struct pci_dev *pdev)
mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
mpi3mr_tgtdev_put(tgtdev);
}
- mpi3mr_cleanup_ioc(mrioc, 0);
+ mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
spin_lock(&mrioc_list_lock);
list_del(&mrioc->list);
@@ -3910,7 +3891,7 @@ static void mpi3mr_shutdown(struct pci_dev *pdev)
spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
if (wq)
destroy_workqueue(wq);
- mpi3mr_cleanup_ioc(mrioc, 0);
+ mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
}
#ifdef CONFIG_PM
@@ -3940,7 +3921,7 @@ static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
mpi3mr_cleanup_fwevt_list(mrioc);
scsi_block_requests(shost);
mpi3mr_stop_watchdog(mrioc);
- mpi3mr_cleanup_ioc(mrioc, 1);
+ mpi3mr_cleanup_ioc(mrioc, MPI3MR_SUSPEND);
device_state = pci_choose_state(pdev, state);
ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
@@ -3988,7 +3969,8 @@ static int mpi3mr_resume(struct pci_dev *pdev)
}
mrioc->stop_drv_processing = 0;
- mpi3mr_init_ioc(mrioc, 1);
+ mpi3mr_memset_buffers(mrioc);
+ mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESUME);
scsi_unblock_requests(shost);
mpi3mr_start_watchdog(mrioc);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index cf4a3a2c22ad..6c82435bc9cc 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -116,6 +116,14 @@ MODULE_PARM_DESC(perf_mode,
"\t\tdefault - default perf_mode is 'balanced'"
);
+static int poll_queues;
+module_param(poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
+ "This parameter is effective only if host_tagset_enable=1. &\n\t\t"
+ "when poll_queues are enabled then &\n\t\t"
+ "perf_mode is set to latency mode. &\n\t\t"
+ );
+
enum mpt3sas_perf_mode {
MPT_PERF_MODE_DEFAULT = -1,
MPT_PERF_MODE_BALANCED = 0,
@@ -709,6 +717,7 @@ _base_fault_reset_work(struct work_struct *work)
* and this call is safe since dead ioc will never return any
* command back from HW.
*/
+ mpt3sas_base_pause_mq_polling(ioc);
ioc->schedule_dead_ioc_flush_running_cmds(ioc);
/*
* Set remove_host flag early since kernel thread will
@@ -744,6 +753,7 @@ _base_fault_reset_work(struct work_struct *work)
spin_unlock_irqrestore(
&ioc->ioc_reset_in_progress_lock, flags);
mpt3sas_base_mask_interrupts(ioc);
+ mpt3sas_base_pause_mq_polling(ioc);
_base_clear_outstanding_commands(ioc);
}
@@ -1548,6 +1558,53 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
/**
+ * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues
+ * when driver is flushing out the IOs.
+ * @ioc: per adapter object
+ *
+ * Pause polling on the mq poll (io uring) queues when driver is flushing
+ * out the IOs. Otherwise we may see the race condition of completing the same
+ * IO from two paths.
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
+{
+ int iopoll_q_count =
+ ioc->reply_queue_count - ioc->iopoll_q_start_index;
+ int qid;
+
+ for (qid = 0; qid < iopoll_q_count; qid++)
+ atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1);
+
+ /*
+ * wait for current poll to complete.
+ */
+ for (qid = 0; qid < iopoll_q_count; qid++) {
+ while (atomic_read(&ioc->io_uring_poll_queues[qid].busy))
+ udelay(500);
+ }
+}
+
+/**
+ * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues.
+ * @ioc: per adapter object
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc)
+{
+ int iopoll_q_count =
+ ioc->reply_queue_count - ioc->iopoll_q_start_index;
+ int qid;
+
+ for (qid = 0; qid < iopoll_q_count; qid++)
+ atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0);
+}
+
+/**
* mpt3sas_base_mask_interrupts - disable interrupts
* @ioc: per adapter object
*
@@ -1722,7 +1779,8 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q)
MPI2_RPHI_MSIX_INDEX_SHIFT),
&ioc->chip->ReplyPostHostIndex);
}
- if (!reply_q->irq_poll_scheduled) {
+ if (!reply_q->is_iouring_poll_q &&
+ !reply_q->irq_poll_scheduled) {
reply_q->irq_poll_scheduled = true;
irq_poll_sched(&reply_q->irqpoll);
}
@@ -1779,6 +1837,33 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q)
}
/**
+ * mpt3sas_blk_mq_poll - poll the blk mq poll queue
+ * @shost: Scsi_Host object
+ * @queue_num: hw ctx queue number
+ *
+ * Return number of entries that has been processed from poll queue.
+ */
+int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ struct MPT3SAS_ADAPTER *ioc =
+ (struct MPT3SAS_ADAPTER *)shost->hostdata;
+ struct adapter_reply_queue *reply_q;
+ int num_entries = 0;
+ int qid = queue_num - ioc->iopoll_q_start_index;
+
+ if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) ||
+ !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1))
+ return 0;
+
+ reply_q = ioc->io_uring_poll_queues[qid].reply_q;
+
+ num_entries = _base_process_reply_queue(reply_q);
+ atomic_dec(&ioc->io_uring_poll_queues[qid].busy);
+
+ return num_entries;
+}
+
+/**
* _base_interrupt - MPT adapter (IOC) specific interrupt handler.
* @irq: irq number (not used)
* @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
@@ -1851,6 +1936,8 @@ _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
return;
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+ if (reply_q->is_iouring_poll_q)
+ continue;
irq_poll_init(&reply_q->irqpoll,
ioc->hba_queue_depth/4, _base_irqpoll);
reply_q->irq_poll_scheduled = false;
@@ -1900,6 +1987,12 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
/* TMs are on msix_index == 0 */
if (reply_q->msix_index == 0)
continue;
+
+ if (reply_q->is_iouring_poll_q) {
+ _base_process_reply_queue(reply_q);
+ continue;
+ }
+
synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
if (reply_q->irq_poll_scheduled) {
/* Calling irq_poll_disable will wait for any pending
@@ -2998,6 +3091,11 @@ mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
list_del(&reply_q->list);
+ if (reply_q->is_iouring_poll_q) {
+ kfree(reply_q);
+ continue;
+ }
+
if (ioc->smp_affinity_enable)
irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
reply_q->msix_index), NULL);
@@ -3019,7 +3117,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
{
struct pci_dev *pdev = ioc->pdev;
struct adapter_reply_queue *reply_q;
- int r;
+ int r, qid;
reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
if (!reply_q) {
@@ -3031,6 +3129,17 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
reply_q->msix_index = index;
atomic_set(&reply_q->busy, 0);
+
+ if (index >= ioc->iopoll_q_start_index) {
+ qid = index - ioc->iopoll_q_start_index;
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d",
+ ioc->driver_name, ioc->id, qid);
+ reply_q->is_iouring_poll_q = 1;
+ ioc->io_uring_poll_queues[qid].reply_q = reply_q;
+ goto out;
+ }
+
+
if (ioc->msix_enable)
snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
ioc->driver_name, ioc->id, index);
@@ -3045,7 +3154,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
kfree(reply_q);
return -EBUSY;
}
-
+out:
INIT_LIST_HEAD(&reply_q->list);
list_add_tail(&reply_q->list, &ioc->reply_queue_list);
return 0;
@@ -3066,6 +3175,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
unsigned int cpu, nr_cpus, nr_msix, index = 0;
struct adapter_reply_queue *reply_q;
int local_numa_node;
+ int iopoll_q_count = ioc->reply_queue_count -
+ ioc->iopoll_q_start_index;
if (!_base_is_controller_msix_enabled(ioc))
return;
@@ -3099,7 +3210,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
const cpumask_t *mask;
- if (reply_q->msix_index < ioc->high_iops_queues)
+ if (reply_q->msix_index < ioc->high_iops_queues ||
+ reply_q->msix_index >= ioc->iopoll_q_start_index)
continue;
mask = pci_irq_get_affinity(ioc->pdev,
@@ -3121,13 +3233,14 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
fall_back:
cpu = cpumask_first(cpu_online_mask);
- nr_msix -= ioc->high_iops_queues;
+ nr_msix -= (ioc->high_iops_queues - iopoll_q_count);
index = 0;
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
unsigned int i, group = nr_cpus / nr_msix;
- if (reply_q->msix_index < ioc->high_iops_queues)
+ if (reply_q->msix_index < ioc->high_iops_queues ||
+ reply_q->msix_index >= ioc->iopoll_q_start_index)
continue;
if (cpu >= nr_cpus)
@@ -3164,8 +3277,12 @@ _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
{
u16 lnksta, speed;
+ /*
+ * Disable high iops queues if io uring poll queues are enabled.
+ */
if (perf_mode == MPT_PERF_MODE_IOPS ||
- perf_mode == MPT_PERF_MODE_LATENCY) {
+ perf_mode == MPT_PERF_MODE_LATENCY ||
+ ioc->io_uring_poll_queues) {
ioc->high_iops_queues = 0;
return;
}
@@ -3202,6 +3319,7 @@ mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
return;
pci_free_irq_vectors(ioc->pdev);
ioc->msix_enable = 0;
+ kfree(ioc->io_uring_poll_queues);
}
/**
@@ -3215,18 +3333,24 @@ _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
int i, irq_flags = PCI_IRQ_MSIX;
struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
struct irq_affinity *descp = &desc;
+ /*
+ * Don't allocate msix vectors for poll_queues.
+ * msix_vectors is always within a range of FW supported reply queue.
+ */
+ int nr_msix_vectors = ioc->iopoll_q_start_index;
+
if (ioc->smp_affinity_enable)
- irq_flags |= PCI_IRQ_AFFINITY;
+ irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
else
descp = NULL;
- ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
- ioc->reply_queue_count);
+ ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues,
+ ioc->reply_queue_count, nr_msix_vectors);
i = pci_alloc_irq_vectors_affinity(ioc->pdev,
ioc->high_iops_queues,
- ioc->reply_queue_count, irq_flags, descp);
+ nr_msix_vectors, irq_flags, descp);
return i;
}
@@ -3242,6 +3366,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
int r;
int i, local_max_msix_vectors;
u8 try_msix = 0;
+ int iopoll_q_count = 0;
ioc->msix_load_balance = false;
@@ -3257,22 +3382,16 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
ioc->cpu_count, max_msix_vectors);
- if (ioc->is_aero_ioc)
- _base_check_and_enable_high_iops_queues(ioc,
- ioc->msix_vector_count);
+
ioc->reply_queue_count =
- min_t(int, ioc->cpu_count + ioc->high_iops_queues,
- ioc->msix_vector_count);
+ min_t(int, ioc->cpu_count, ioc->msix_vector_count);
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
local_max_msix_vectors = (reset_devices) ? 1 : 8;
else
local_max_msix_vectors = max_msix_vectors;
- if (local_max_msix_vectors > 0)
- ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
- ioc->reply_queue_count);
- else if (local_max_msix_vectors == 0)
+ if (local_max_msix_vectors == 0)
goto try_ioapic;
/*
@@ -3293,14 +3412,77 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
if (ioc->msix_load_balance)
ioc->smp_affinity_enable = 0;
+ if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1)
+ ioc->shost->host_tagset = 0;
+
+ /*
+ * Enable io uring poll queues only if host_tagset is enabled.
+ */
+ if (ioc->shost->host_tagset)
+ iopoll_q_count = poll_queues;
+
+ if (iopoll_q_count) {
+ ioc->io_uring_poll_queues = kcalloc(iopoll_q_count,
+ sizeof(struct io_uring_poll_queue), GFP_KERNEL);
+ if (!ioc->io_uring_poll_queues)
+ iopoll_q_count = 0;
+ }
+
+ if (ioc->is_aero_ioc)
+ _base_check_and_enable_high_iops_queues(ioc,
+ ioc->msix_vector_count);
+
+ /*
+ * Add high iops queues count to reply queue count if high iops queues
+ * are enabled.
+ */
+ ioc->reply_queue_count = min_t(int,
+ ioc->reply_queue_count + ioc->high_iops_queues,
+ ioc->msix_vector_count);
+
+ /*
+ * Adjust the reply queue count incase reply queue count
+ * exceeds the user provided MSIx vectors count.
+ */
+ if (local_max_msix_vectors > 0)
+ ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
+ ioc->reply_queue_count);
+ /*
+ * Add io uring poll queues count to reply queues count
+ * if io uring is enabled in driver.
+ */
+ if (iopoll_q_count) {
+ if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS))
+ iopoll_q_count = 0;
+ ioc->reply_queue_count = min_t(int,
+ ioc->reply_queue_count + iopoll_q_count,
+ ioc->msix_vector_count);
+ }
+
+ /*
+ * Starting index of io uring poll queues in reply queue list.
+ */
+ ioc->iopoll_q_start_index =
+ ioc->reply_queue_count - iopoll_q_count;
+
r = _base_alloc_irq_vectors(ioc);
if (r < 0) {
ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
goto try_ioapic;
}
+ /*
+ * Adjust the reply queue count if the allocated
+ * MSIx vectors is less then the requested number
+ * of MSIx vectors.
+ */
+ if (r < ioc->iopoll_q_start_index) {
+ ioc->reply_queue_count = r + iopoll_q_count;
+ ioc->iopoll_q_start_index =
+ ioc->reply_queue_count - iopoll_q_count;
+ }
+
ioc->msix_enable = 1;
- ioc->reply_queue_count = r;
for (i = 0; i < ioc->reply_queue_count; i++) {
r = _base_request_irq(ioc, i);
if (r) {
@@ -3320,6 +3502,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->high_iops_queues = 0;
ioc_info(ioc, "High IOPs queues : disabled\n");
ioc->reply_queue_count = 1;
+ ioc->iopoll_q_start_index = ioc->reply_queue_count - 0;
r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
if (r < 0) {
dfailprintk(ioc,
@@ -3416,6 +3599,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
u64 pio_chip = 0;
phys_addr_t chip_phys = 0;
struct adapter_reply_queue *reply_q;
+ int iopoll_q_count = 0;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -3489,6 +3673,12 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
if (r)
goto out_fail;
+ iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index;
+ for (i = 0; i < iopoll_q_count; i++) {
+ atomic_set(&ioc->io_uring_poll_queues[i].busy, 0);
+ atomic_set(&ioc->io_uring_poll_queues[i].pause, 0);
+ }
+
if (!ioc->is_driver_loading)
_base_init_irqpolls(ioc);
/* Use the Combined reply queue feature only for SAS3 C0 & higher
@@ -3530,11 +3720,18 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
* 4)));
}
- list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ if (reply_q->msix_index >= ioc->iopoll_q_start_index) {
+ pr_info("%s: enabled: index: %d\n",
+ reply_q->name, reply_q->msix_index);
+ continue;
+ }
+
pr_info("%s: %s enabled: IRQ %d\n",
reply_q->name,
ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
pci_irq_vector(ioc->pdev, reply_q->msix_index));
+ }
ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
&chip_phys, ioc->chip, memap_sz);
@@ -3651,7 +3848,7 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
&ioc->total_io_cnt), ioc->reply_queue_count) : 0;
if (scmd && ioc->shost->nr_hw_queues > 1) {
- u32 tag = blk_mq_unique_tag(scmd->request);
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
return blk_mq_unique_tag_to_hwq(tag) +
ioc->high_iops_queues;
@@ -3735,7 +3932,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
u16 smid;
u32 tag, unique_tag;
- unique_tag = blk_mq_unique_tag(scmd->request);
+ unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
tag = blk_mq_unique_tag_to_tag(unique_tag);
/*
@@ -5169,6 +5366,73 @@ _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _base_assign_fw_reported_qd - Get FW reported QD for SAS/SATA devices.
+ * - On failure set default QD values.
+ * @ioc : per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
+ int sz;
+ int rc = 0;
+
+ ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
+ ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
+ ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH;
+ ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH;
+ if (!ioc->is_gen35_ioc)
+ goto out;
+ /* sas iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err("%s: failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return rc;
+ }
+ rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz);
+ if (rc) {
+ pr_err("%s: failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc->max_wideport_qd =
+ (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ?
+ le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) :
+ MPT3SAS_SAS_QUEUE_DEPTH;
+ ioc->max_narrowport_qd =
+ (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ?
+ le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) :
+ MPT3SAS_SAS_QUEUE_DEPTH;
+ ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ?
+ sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH;
+ /* pcie iounit page 1 */
+ rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
+ &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
+ if (rc) {
+ pr_err("%s: failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ?
+ (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) :
+ MPT3SAS_NVME_QUEUE_DEPTH;
+out:
+ dinitprintk(ioc, pr_err(
+ "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n",
+ ioc->max_wideport_qd, ioc->max_narrowport_qd,
+ ioc->max_sata_qd, ioc->max_nvme_qd));
+ kfree(sas_iounit_pg1);
+ return rc;
+}
+
+/**
* _base_static_config_pages - static start of day config pages
* @ioc: per adapter object
*/
@@ -5237,6 +5501,9 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
ioc_warn(ioc,
"TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
}
+ rc = _base_assign_fw_reported_qd(ioc);
+ if (rc)
+ return rc;
rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
if (rc)
return rc;
@@ -8471,6 +8738,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
_base_pre_reset_handler(ioc);
mpt3sas_wait_for_commands_to_complete(ioc);
mpt3sas_base_mask_interrupts(ioc);
+ mpt3sas_base_pause_mq_polling(ioc);
r = mpt3sas_base_make_ioc_ready(ioc, type);
if (r)
goto out;
@@ -8512,6 +8780,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
ioc->ioc_reset_count++;
mutex_unlock(&ioc->reset_in_progress_mutex);
+ mpt3sas_base_resume_mq_polling(ioc);
out_unlocked:
if ((r == 0) && is_trigger) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 0c6c3df0038d..f87c0911f66a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,9 +77,9 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "37.101.00.00"
-#define MPT3SAS_MAJOR_VERSION 37
-#define MPT3SAS_MINOR_VERSION 101
+#define MPT3SAS_DRIVER_VERSION "39.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 39
+#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -354,6 +354,7 @@ struct mpt3sas_nvme_cmd {
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
+#define MPT3_MIN_IRQS 1
/* OEM Identifiers */
#define MFG10_OEM_ID_INVALID (0x00000000)
@@ -575,6 +576,7 @@ struct _sas_device {
u8 is_chassis_slot_valid;
u8 connector_name[5];
struct kref refcount;
+ u8 port_type;
struct hba_port *port;
struct sas_rphy *rphy;
};
@@ -936,6 +938,8 @@ struct _event_ack_list {
* @os_irq: irq number
* @irqpoll: irq_poll object
* @irq_poll_scheduled: Tells whether irq poll is scheduled or not
+ * @is_iouring_poll_q: Tells whether reply queues is assigned
+ * to io uring poll queues or not
* @list: this list
*/
struct adapter_reply_queue {
@@ -949,9 +953,22 @@ struct adapter_reply_queue {
struct irq_poll irqpoll;
bool irq_poll_scheduled;
bool irq_line_enable;
+ bool is_iouring_poll_q;
struct list_head list;
};
+/**
+ * struct io_uring_poll_queue - the io uring poll queue structure
+ * @busy: Tells whether io uring poll queue is busy or not
+ * @pause: Tells whether IOs are paused on io uring poll queue or not
+ * @reply_q: reply queue mapped for io uring poll queue
+ */
+struct io_uring_poll_queue {
+ atomic_t busy;
+ atomic_t pause;
+ struct adapter_reply_queue *reply_q;
+};
+
typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
/* SAS3.0 support */
@@ -1176,6 +1193,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
* @thresh_hold: Max number of reply descriptors processed
* before updating Host Index
+ * @iopoll_q_start_index: starting index of io uring poll queues
+ * in reply queue list
* @drv_internal_flags: Bit map internal to driver
* @drv_support_bitmap: driver's supported feature bit map
* @use_32bit_dma: Flag to use 32 bit consistent dma mask
@@ -1372,11 +1391,13 @@ struct MPT3SAS_ADAPTER {
bool msix_load_balance;
u16 thresh_hold;
u8 high_iops_queues;
+ u8 iopoll_q_start_index;
u32 drv_internal_flags;
u32 drv_support_bitmap;
u32 dma_mask;
bool enable_sdev_max_qd;
bool use_32bit_dma;
+ struct io_uring_poll_queue *io_uring_poll_queues;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
@@ -1423,6 +1444,10 @@ struct MPT3SAS_ADAPTER {
u8 tm_custom_handling;
u8 nvme_abort_timeout;
u16 max_shutdown_latency;
+ u16 max_wideport_qd;
+ u16 max_narrowport_qd;
+ u16 max_nvme_qd;
+ u8 max_sata_qd;
/* static config pages */
struct mpt3sas_facts facts;
@@ -1730,10 +1755,12 @@ do { ioc_err(ioc, "In func: %s\n", __func__); \
status, mpi_request, sz); } while (0)
int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
-int
-mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type);
+int mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type);
void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc);
+int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
+void mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc);
/* scsih shared API */
struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
@@ -1829,6 +1856,9 @@ int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
int mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page,
u32 form, u32 handle);
+int mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page,
+ u16 sz);
int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
u16 sz);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 83a5c2172ad4..0563078227de 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -1169,6 +1169,43 @@ out:
}
/**
+ * mpt3sas_config_get_pcie_iounit_pg1 - obtain pcie iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT;
+ mpi_request.Header.PageVersion = MPI26_PCIEIOUNITPAGE1_PAGEVERSION;
+ mpi_request.Header.PageNumber = 1;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+out:
+ return r;
+}
+
+/**
* mpt3sas_config_get_pcie_device_pg2 - obtain pcie device page 2
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index b66140e4c370..770b241d7bb2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -3820,9 +3820,10 @@ enable_sdev_max_qd_store(struct device *cdev,
}
} else if (sas_target_priv_data->flags &
MPT_TARGET_FLAGS_PCIE_DEVICE)
- qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
+ qdepth = ioc->max_nvme_qd;
else
- qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ qdepth = (sas_target_priv_data->sas_dev->port_type > 1) ?
+ ioc->max_wideport_qd : ioc->max_narrowport_qd;
mpt3sas_scsih_change_queue_depth(sdev, qdepth);
}
@@ -3919,6 +3920,24 @@ sas_device_handle_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(sas_device_handle);
/**
+ * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
+ * @dev: pointer to embedded device
+ * @attr: sas_ncq_prio_supported attribute descriptor
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' sdev attribute, only works with SATA
+ */
+static ssize_t
+sas_ncq_prio_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
+}
+static DEVICE_ATTR_RO(sas_ncq_prio_supported);
+
+/**
* sas_ncq_prio_enable_show - send prioritized io commands to device
* @dev: pointer to embedded device
* @attr: ?
@@ -3960,6 +3979,7 @@ static DEVICE_ATTR_RW(sas_ncq_prio_enable);
struct device_attribute *mpt3sas_dev_attrs[] = {
&dev_attr_sas_address,
&dev_attr_sas_device_handle,
+ &dev_attr_sas_ncq_prio_supported,
&dev_attr_sas_ncq_prio_enable,
NULL,
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8e64a6f14542..2f82b1e629af 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1803,7 +1803,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
* limit max device queue for SATA to 32 if enable_sdev_max_qd
* is disabled.
*/
- if (ioc->enable_sdev_max_qd)
+ if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
goto not_sata;
sas_device_priv_data = sdev->hostdata;
@@ -2657,7 +2657,7 @@ scsih_slave_configure(struct scsi_device *sdev)
return 1;
}
- qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
+ qdepth = ioc->max_nvme_qd;
ds = "NVMe";
sdev_printk(KERN_INFO, sdev,
"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
@@ -2709,7 +2709,8 @@ scsih_slave_configure(struct scsi_device *sdev)
sas_device->volume_handle = volume_handle;
sas_device->volume_wwid = volume_wwid;
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
- qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ qdepth = (sas_device->port_type > 1) ?
+ ioc->max_wideport_qd : ioc->max_narrowport_qd;
ssp_target = 1;
if (sas_device->device_info &
MPI2_SAS_DEVICE_INFO_SEP) {
@@ -2721,7 +2722,7 @@ scsih_slave_configure(struct scsi_device *sdev)
} else
ds = "SSP";
} else {
- qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
+ qdepth = ioc->max_sata_qd;
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
ds = "STP";
else if (sas_device->device_info &
@@ -3304,7 +3305,7 @@ scsih_abort(struct scsi_cmnd *scmd)
sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
"scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
- (scmd->request->timeout / HZ) * 1000);
+ (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
@@ -5047,48 +5048,31 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
Mpi25SCSIIORequest_t *mpi_request)
{
u16 eedp_flags;
- unsigned char prot_op = scsi_get_prot_op(scmd);
- unsigned char prot_type = scsi_get_prot_type(scmd);
Mpi25SCSIIORequest_t *mpi_request_3v =
(Mpi25SCSIIORequest_t *)mpi_request;
- if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
- return;
-
- if (prot_op == SCSI_PROT_READ_STRIP)
+ switch (scsi_get_prot_op(scmd)) {
+ case SCSI_PROT_READ_STRIP:
eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
- else if (prot_op == SCSI_PROT_WRITE_INSERT)
+ break;
+ case SCSI_PROT_WRITE_INSERT:
eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
- else
+ break;
+ default:
return;
+ }
- switch (prot_type) {
- case SCSI_PROT_DIF_TYPE1:
- case SCSI_PROT_DIF_TYPE2:
+ if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
- /*
- * enable ref/guard checking
- * auto increment ref tag
- */
+ if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
- cpu_to_be32(t10_pi_ref_tag(scmd->request));
- break;
-
- case SCSI_PROT_DIF_TYPE3:
-
- /*
- * enable guard checking
- */
- eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
-
- break;
+ cpu_to_be32(scsi_prot_ref_tag(scmd));
}
- mpi_request_3v->EEDPBlockSize =
- cpu_to_le16(scmd->device->sector_size);
+ mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
if (ioc->is_gen35_ioc)
eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
@@ -5141,7 +5125,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _raid_device *raid_device;
- struct request *rq = scmd->request;
+ struct request *rq = scsi_cmd_to_rq(scmd);
int class;
Mpi25SCSIIORequest_t *mpi_request;
struct _pcie_device *pcie_device = NULL;
@@ -7371,6 +7355,10 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
/* get device name */
sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
+ sas_device->port_type = sas_device_pg0.MaxPortConnections;
+ ioc_info(ioc,
+ "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
+ handle, sas_device->sas_address, sas_device->port_type);
if (ioc->wait_for_discovery_to_complete)
_scsih_sas_device_init_add(ioc, sas_device);
@@ -9604,6 +9592,42 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _scsih_update_device_qdepth - Update QD during Reset.
+ * @ioc: per adapter object
+ *
+ */
+static void
+_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct scsi_device *sdev;
+ u16 qdepth;
+
+ ioc_info(ioc, "Update devices with firmware reported queue depth\n");
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (sas_device_priv_data && sas_device_priv_data->sas_target) {
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ sas_device = sas_device_priv_data->sas_target->sas_dev;
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
+ qdepth = ioc->max_nvme_qd;
+ else if (sas_device &&
+ sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
+ qdepth = (sas_device->port_type > 1) ?
+ ioc->max_wideport_qd : ioc->max_narrowport_qd;
+ else if (sas_device &&
+ sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ qdepth = ioc->max_sata_qd;
+ else
+ continue;
+ mpt3sas_scsih_change_queue_depth(sdev, qdepth);
+ }
+ }
+}
+
+/**
* _scsih_mark_responding_sas_device - mark a sas_devices as responding
* @ioc: per adapter object
* @sas_device_pg0: SAS Device page 0
@@ -10654,6 +10678,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
_scsih_remove_unresponding_devices(ioc);
_scsih_del_dirty_vphy(ioc);
_scsih_del_dirty_port_entries(ioc);
+ if (ioc->is_gen35_ioc)
+ _scsih_update_device_qdepth(ioc);
_scsih_scan_for_devices_after_reset(ioc);
/*
* If diag reset has occurred during the driver load
@@ -11178,8 +11204,10 @@ static void scsih_remove(struct pci_dev *pdev)
ioc->remove_host = 1;
- if (!pci_device_is_present(pdev))
+ if (!pci_device_is_present(pdev)) {
+ mpt3sas_base_pause_mq_polling(ioc);
_scsih_flush_running_cmds(ioc);
+ }
_scsih_fw_event_cleanup_queue(ioc);
@@ -11274,8 +11302,10 @@ scsih_shutdown(struct pci_dev *pdev)
ioc->remove_host = 1;
- if (!pci_device_is_present(pdev))
+ if (!pci_device_is_present(pdev)) {
+ mpt3sas_base_pause_mq_polling(ioc);
_scsih_flush_running_cmds(ioc);
+ }
_scsih_fw_event_cleanup_queue(ioc);
@@ -11785,12 +11815,41 @@ static int scsih_map_queues(struct Scsi_Host *shost)
{
struct MPT3SAS_ADAPTER *ioc =
(struct MPT3SAS_ADAPTER *)shost->hostdata;
+ struct blk_mq_queue_map *map;
+ int i, qoff, offset;
+ int nr_msix_vectors = ioc->iopoll_q_start_index;
+ int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
- if (ioc->shost->nr_hw_queues == 1)
+ if (shost->nr_hw_queues == 1)
return 0;
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- ioc->pdev, ioc->high_iops_queues);
+ for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
+ map = &shost->tag_set.map[i];
+ map->nr_queues = 0;
+ offset = 0;
+ if (i == HCTX_TYPE_DEFAULT) {
+ map->nr_queues =
+ nr_msix_vectors - ioc->high_iops_queues;
+ offset = ioc->high_iops_queues;
+ } else if (i == HCTX_TYPE_POLL)
+ map->nr_queues = iopoll_q_count;
+
+ if (!map->nr_queues)
+ BUG_ON(i == HCTX_TYPE_DEFAULT);
+
+ /*
+ * The poll queue(s) doesn't have an IRQ (and hence IRQ
+ * affinity), so use the regular blk-mq cpu mapping
+ */
+ map->queue_offset = qoff;
+ if (i != HCTX_TYPE_POLL)
+ blk_mq_pci_map_queues(map, ioc->pdev, offset);
+ else
+ blk_mq_map_queues(map);
+
+ qoff += map->nr_queues;
+ }
+ return 0;
}
/* shost template for SAS 2.0 HBA devices */
@@ -11861,6 +11920,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
.track_queue_depth = 1,
.cmd_size = sizeof(struct scsiio_tracker),
.map_queues = scsih_map_queues,
+ .mq_poll = mpt3sas_blk_mq_poll,
};
/* raid transport support for SAS 3.0 HBA devices */
@@ -11957,6 +12017,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct Scsi_Host *shost = NULL;
int rv;
u16 hba_mpi_version;
+ int iopoll_q_count = 0;
/* Determine in which MPI version class this pci device belongs */
hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
@@ -12204,6 +12265,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_thread_fail;
}
+ shost->host_tagset = 0;
+
+ if (ioc->is_gen35_ioc && host_tagset_enable)
+ shost->host_tagset = 1;
+
ioc->is_driver_loading = 1;
if ((mpt3sas_base_attach(ioc))) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -12226,16 +12292,17 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
} else
ioc->hide_drives = 0;
- shost->host_tagset = 0;
shost->nr_hw_queues = 1;
- if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 &&
- host_tagset_enable && ioc->smp_affinity_enable) {
-
- shost->host_tagset = 1;
+ if (shost->host_tagset) {
shost->nr_hw_queues =
ioc->reply_queue_count - ioc->high_iops_queues;
+ iopoll_q_count =
+ ioc->reply_queue_count - ioc->iopoll_q_start_index;
+
+ shost->nr_maps = iopoll_q_count ? 3 : 1;
+
dev_info(&ioc->pdev->dev,
"Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
shost->can_queue, shost->nr_hw_queues);
@@ -12359,6 +12426,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
/* Permanent error, prepare for device removal */
ioc->pci_error_recovery = 1;
mpt3sas_base_stop_watchdog(ioc);
+ mpt3sas_base_pause_mq_polling(ioc);
_scsih_flush_running_cmds(ioc);
return PCI_ERS_RESULT_DISCONNECT;
}
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 6bb03d7a254d..4d251bf630a3 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -702,7 +702,7 @@ static int mvumi_host_reset(struct scsi_cmnd *scmd)
mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n",
- scmd->request->tag, scmd->cmnd[0], scmd->retries);
+ scsi_cmd_to_rq(scmd)->tag, scmd->cmnd[0], scmd->retries);
return mhba->instancet->reset_host(mhba);
}
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 542ed88ef90d..a4a88323e020 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1263,6 +1263,7 @@ static int myrb_host_reset(struct scsi_cmnd *scmd)
static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scmd)
{
+ struct request *rq = scsi_cmd_to_rq(scmd);
struct myrb_hba *cb = shost_priv(shost);
struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
@@ -1286,7 +1287,7 @@ static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
}
mbox->type3.opcode = MYRB_CMD_DCDB;
- mbox->type3.id = scmd->request->tag + 3;
+ mbox->type3.id = rq->tag + 3;
mbox->type3.addr = dcdb_addr;
dcdb->channel = sdev->channel;
dcdb->target = sdev->id;
@@ -1305,11 +1306,11 @@ static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
break;
}
dcdb->early_status = false;
- if (scmd->request->timeout <= 10)
+ if (rq->timeout <= 10)
dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
- else if (scmd->request->timeout <= 60)
+ else if (rq->timeout <= 60)
dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
- else if (scmd->request->timeout <= 600)
+ else if (rq->timeout <= 600)
dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
else
dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
@@ -1550,7 +1551,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
}
myrb_reset_cmd(cmd_blk);
- mbox->type5.id = scmd->request->tag + 3;
+ mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3;
if (scmd->sc_data_direction == DMA_NONE)
goto submit;
nsge = scsi_dma_map(scmd);
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 26326af23dbc..07f274afd7e5 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -1582,6 +1582,7 @@ static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
static int myrs_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scmd)
{
+ struct request *rq = scsi_cmd_to_rq(scmd);
struct myrs_hba *cs = shost_priv(shost);
struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
@@ -1628,7 +1629,7 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
return SCSI_MLQUEUE_HOST_BUSY;
cmd_blk->sense_addr = sense_addr;
- timeout = scmd->request->timeout;
+ timeout = rq->timeout;
if (scmd->cmd_len <= 10) {
if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
struct myrs_ldev_info *ldev_info = sdev->hostdata;
@@ -1644,10 +1645,10 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
mbox->SCSI_10.pdev.target = sdev->id;
mbox->SCSI_10.pdev.channel = sdev->channel;
}
- mbox->SCSI_10.id = scmd->request->tag + 3;
+ mbox->SCSI_10.id = rq->tag + 3;
mbox->SCSI_10.control.dma_ctrl_to_host =
(scmd->sc_data_direction == DMA_FROM_DEVICE);
- if (scmd->request->cmd_flags & REQ_FUA)
+ if (rq->cmd_flags & REQ_FUA)
mbox->SCSI_10.control.fua = true;
mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
@@ -1690,10 +1691,10 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
mbox->SCSI_255.pdev.target = sdev->id;
mbox->SCSI_255.pdev.channel = sdev->channel;
}
- mbox->SCSI_255.id = scmd->request->tag + 3;
+ mbox->SCSI_255.id = rq->tag + 3;
mbox->SCSI_255.control.dma_ctrl_to_host =
(scmd->sc_data_direction == DMA_FROM_DEVICE);
- if (scmd->request->cmd_flags & REQ_FUA)
+ if (rq->cmd_flags & REQ_FUA)
mbox->SCSI_255.control.fua = true;
mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index c76e9f05d042..7a4f5d4dd670 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -1453,11 +1453,6 @@ struct head {
#define xerr_status phys.xerr_st
#define nego_status phys.nego_st
-#if 0
-#define sync_status phys.sync_st
-#define wide_status phys.wide_st
-#endif
-
/*==========================================================
**
** Declaration of structs: Data structure block
@@ -1980,9 +1975,6 @@ static inline char *ncr_name (struct ncb *np)
#define RELOC_SOFTC 0x40000000
#define RELOC_LABEL 0x50000000
#define RELOC_REGISTER 0x60000000
-#if 0
-#define RELOC_KVAR 0x70000000
-#endif
#define RELOC_LABELH 0x80000000
#define RELOC_MASK 0xf0000000
@@ -1991,21 +1983,7 @@ static inline char *ncr_name (struct ncb *np)
#define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label))
#define RADDR(label) (RELOC_REGISTER | REG(label))
#define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs)))
-#if 0
-#define KVAR(which) (RELOC_KVAR | (which))
-#endif
-#if 0
-#define SCRIPT_KVAR_JIFFIES (0)
-#define SCRIPT_KVAR_FIRST SCRIPT_KVAR_JIFFIES
-#define SCRIPT_KVAR_LAST SCRIPT_KVAR_JIFFIES
-/*
- * Kernel variables referenced in the scripts.
- * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY.
- */
-static void *script_kvars[] __initdata =
- { (void *)&jiffies };
-#endif
static struct script script0 __initdata = {
/*--------------------------< START >-----------------------*/ {
@@ -2162,11 +2140,6 @@ static struct script script0 __initdata = {
SCR_COPY (1),
RADDR (scratcha),
NADDR (msgout),
-#if 0
- SCR_COPY (1),
- RADDR (scratcha),
- NADDR (msgin),
-#endif
/*
** Anticipate the COMMAND phase.
** This is the normal case for initial selection.
@@ -4164,8 +4137,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
**
**----------------------------------------------------
*/
- if (np->settle_time && cmd->request->timeout >= HZ) {
- u_long tlimit = jiffies + cmd->request->timeout - HZ;
+ if (np->settle_time && scsi_cmd_to_rq(cmd)->timeout >= HZ) {
+ u_long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout - HZ;
if (time_after(np->settle_time, tlimit))
np->settle_time = tlimit;
}
@@ -4378,10 +4351,6 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
cp->parity_status = 0;
cp->xerr_status = XE_OK;
-#if 0
- cp->sync_status = tp->sval;
- cp->wide_status = tp->wval;
-#endif
/*----------------------------------------------------
**
@@ -4553,12 +4522,8 @@ static void ncr_start_reset(struct ncb *np)
**
**==========================================================
*/
-static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
+static int ncr_reset_bus (struct ncb *np)
{
-/* struct scsi_device *device = cmd->device; */
- struct ccb *cp;
- int found;
-
/*
* Return immediately if reset is in progress.
*/
@@ -4573,24 +4538,6 @@ static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
*/
ncr_start_reset(np);
/*
- * First, look in the wakeup list
- */
- for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
- /*
- ** look for the ccb of this command.
- */
- if (cp->host_status == HS_IDLE) continue;
- if (cp->cmd == cmd) {
- found = 1;
- break;
- }
- }
-/*
- * Then, look in the waiting list
- */
- if (!found && retrieve_from_waiting_list(0, np, cmd))
- found = 1;
-/*
* Wake-up all awaiting commands with DID_RESET.
*/
reset_waiting_list(np);
@@ -4598,103 +4545,10 @@ static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
* Wake-up all pending commands with HS_RESET -> DID_RESET.
*/
ncr_wakeup(np, HS_RESET);
-/*
- * If the involved command was not in a driver queue, and the
- * scsi driver told us reset is synchronous, and the command is not
- * currently in the waiting list, complete it with DID_RESET status,
- * in order to keep it alive.
- */
- if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) {
- set_host_byte(cmd, DID_RESET);
- ncr_queue_done_cmd(np, cmd);
- }
return SUCCESS;
}
-#if 0 /* unused and broken.. */
-/*==========================================================
-**
-**
-** Abort an SCSI command.
-** This is called from the generic SCSI driver.
-**
-**
-**==========================================================
-*/
-static int ncr_abort_command (struct ncb *np, struct scsi_cmnd *cmd)
-{
-/* struct scsi_device *device = cmd->device; */
- struct ccb *cp;
- int found;
- int retv;
-
-/*
- * First, look for the scsi command in the waiting list
- */
- if (remove_from_waiting_list(np, cmd)) {
- set_host_byte(cmd, DID_ABORT);
- ncr_queue_done_cmd(np, cmd);
- return SCSI_ABORT_SUCCESS;
- }
-
-/*
- * Then, look in the wakeup list
- */
- for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
- /*
- ** look for the ccb of this command.
- */
- if (cp->host_status == HS_IDLE) continue;
- if (cp->cmd == cmd) {
- found = 1;
- break;
- }
- }
-
- if (!found) {
- return SCSI_ABORT_NOT_RUNNING;
- }
-
- if (np->settle_time) {
- return SCSI_ABORT_SNOOZE;
- }
-
- /*
- ** If the CCB is active, patch schedule jumps for the
- ** script to abort the command.
- */
-
- switch(cp->host_status) {
- case HS_BUSY:
- case HS_NEGOTIATE:
- printk ("%s: abort ccb=%p (cancel)\n", ncr_name (np), cp);
- cp->start.schedule.l_paddr =
- cpu_to_scr(NCB_SCRIPTH_PHYS (np, cancel));
- retv = SCSI_ABORT_PENDING;
- break;
- case HS_DISCONNECT:
- cp->restart.schedule.l_paddr =
- cpu_to_scr(NCB_SCRIPTH_PHYS (np, abort));
- retv = SCSI_ABORT_PENDING;
- break;
- default:
- retv = SCSI_ABORT_NOT_RUNNING;
- break;
-
- }
-
- /*
- ** If there are no requests, the script
- ** processor will sleep on SEL_WAIT_RESEL.
- ** Let's wake it up, since it may have to work.
- */
- OUTB (nc_istat, SIGP);
-
- return retv;
-}
-#endif
-
static void ncr_detach(struct ncb *np)
{
struct ccb *cp;
@@ -5453,27 +5307,6 @@ static void ncr_getsync(struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl
*/
fak = (kpc - 1) / div_10M[div] + 1;
-#if 0 /* This optimization does not seem very useful */
-
- per = (fak * div_10M[div]) / clk;
-
- /*
- ** Why not to try the immediate lower divisor and to choose
- ** the one that allows the fastest output speed ?
- ** We don't want input speed too much greater than output speed.
- */
- if (div >= 1 && fak < 8) {
- u_long fak2, per2;
- fak2 = (kpc - 1) / div_10M[div-1] + 1;
- per2 = (fak2 * div_10M[div-1]) / clk;
- if (per2 < per && fak2 <= 8) {
- fak = fak2;
- per = per2;
- --div;
- }
- }
-#endif
-
if (fak < 4) fak = 4; /* Should never happen, too bad ... */
/*
@@ -5511,10 +5344,6 @@ static void ncr_set_sync_wide_status (struct ncb *np, u_char target)
for (cp = np->ccb; cp; cp = cp->link_ccb) {
if (!cp->cmd) continue;
if (scmd_id(cp->cmd) != target) continue;
-#if 0
- cp->sync_status = tp->sval;
- cp->wide_status = tp->wval;
-#endif
cp->phys.select.sel_scntl3 = tp->wval;
cp->phys.select.sel_sxfer = tp->sval;
}
@@ -8125,7 +7954,7 @@ static int ncr53c8xx_bus_reset(struct scsi_cmnd *cmd)
*/
spin_lock_irqsave(&np->smp_lock, flags);
- sts = ncr_reset_bus(np, cmd, 1);
+ sts = ncr_reset_bus(np);
done_list = np->done_list;
np->done_list = NULL;
@@ -8136,30 +7965,6 @@ static int ncr53c8xx_bus_reset(struct scsi_cmnd *cmd)
return sts;
}
-#if 0 /* unused and broken */
-static int ncr53c8xx_abort(struct scsi_cmnd *cmd)
-{
- struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb;
- int sts;
- unsigned long flags;
- struct scsi_cmnd *done_list;
-
- printk("ncr53c8xx_abort\n");
-
- NCR_LOCK_NCB(np, flags);
-
- sts = ncr_abort_command(np, cmd);
-out:
- done_list = np->done_list;
- np->done_list = NULL;
- NCR_UNLOCK_NCB(np, flags);
-
- ncr_flush_done_cmds(done_list);
-
- return sts;
-}
-#endif
-
/*
** Scsi command waiting list management.
diff --git a/drivers/scsi/pcmcia/fdomain_cs.c b/drivers/scsi/pcmcia/fdomain_cs.c
index e42acf314d06..33df6a9ba9b5 100644
--- a/drivers/scsi/pcmcia/fdomain_cs.c
+++ b/drivers/scsi/pcmcia/fdomain_cs.c
@@ -45,8 +45,10 @@ static int fdomain_probe(struct pcmcia_device *link)
goto fail_disable;
if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE,
- "fdomain_cs"))
+ "fdomain_cs")) {
+ ret = -EBUSY;
goto fail_disable;
+ }
sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev);
if (!sh) {
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 17c0f26e683a..63690508313b 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1323,7 +1323,7 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
void *pMessage;
unsigned long flags;
int q_index = circularQ - pm8001_ha->inbnd_q_tbl;
- int rv = -1;
+ int rv;
WARN_ON(q_index >= PM8001_MAX_INB_NUM);
spin_lock_irqsave(&circularQ->iq_lock, flags);
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 6b5b6a75ac88..3404782988d5 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1162,13 +1162,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
return;
}
- if (!sc_cmd->request) {
- QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
- "sc_cmd=%p.\n", sc_cmd);
- return;
- }
-
- if (!sc_cmd->request->q) {
+ if (!scsi_cmd_to_rq(sc_cmd)->q) {
QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
"is not valid, sc_cmd=%p.\n", sc_cmd);
return;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 85f41abcb56c..42d0d941dba5 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3004,7 +3004,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
{
u32 *list;
int i;
- int status = 0, rc;
+ int status;
u32 *pbl;
dma_addr_t page;
int num_pages;
@@ -3016,7 +3016,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
*/
if (!qedf->num_queues) {
QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
- return 1;
+ return -ENOMEM;
}
/*
@@ -3024,7 +3024,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
* addresses of our queues
*/
if (!qedf->p_cpuq) {
- status = 1;
+ status = -EINVAL;
QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
goto mem_alloc_failure;
}
@@ -3040,8 +3040,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
"qedf->global_queues=%p.\n", qedf->global_queues);
/* Allocate DMA coherent buffers for BDQ */
- rc = qedf_alloc_bdq(qedf);
- if (rc) {
+ status = qedf_alloc_bdq(qedf);
+ if (status) {
QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
goto mem_alloc_failure;
}
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 71333d3c5c86..d01cd829ef97 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -609,14 +609,7 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
goto error;
}
- if (!sc_cmd->request) {
- QEDI_WARN(&qedi->dbg_ctx,
- "sc_cmd->request is NULL, sc_cmd=%p.\n",
- sc_cmd);
- goto error;
- }
-
- if (!sc_cmd->request->q) {
+ if (!scsi_cmd_to_rq(sc_cmd)->q) {
QEDI_WARN(&qedi->dbg_ctx,
"request->q is NULL so request is not valid, sc_cmd=%p.\n",
sc_cmd);
@@ -936,17 +929,11 @@ exit_fp_process:
static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
{
- struct iscsi_db_data dbell = { 0 };
-
- dbell.agg_flags = 0;
+ qedi_conn->ep->db_data.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
- dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT;
- dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT;
- dbell.params |=
- DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT;
-
- dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
- writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell);
+ /* wmb - Make sure fw idx is coherent */
+ wmb();
+ writel(*(u32 *)&qedi_conn->ep->db_data, qedi_conn->ep->p_doorbell);
/* Make sure fw write idx is coherent, and include both memory barriers
* as a failsafe as for some architectures the call is the same but on
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 97f83760da88..c5260429c637 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -499,8 +499,8 @@ static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en)
static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
{
- struct qedi_ctx *qedi = qedi_ep->qedi;
struct qed_iscsi_params_offload *conn_info;
+ struct qedi_ctx *qedi = qedi_ep->qedi;
int rval;
int i;
@@ -577,10 +577,37 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
"Default cq index [%d], mss [%d]\n",
conn_info->default_cq, conn_info->mss);
+ /* Prepare the doorbell parameters */
+ qedi_ep->db_data.agg_flags = 0;
+ qedi_ep->db_data.params = 0;
+ SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_AGG_CMD,
+ DB_AGG_CMD_MAX);
+ SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ISCSI_SQ_PROD_CMD);
+ SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_BYPASS_EN, 1);
+
+ /* Register doorbell with doorbell recovery mechanism */
+ rval = qedi_ops->common->db_recovery_add(qedi->cdev,
+ qedi_ep->p_doorbell,
+ &qedi_ep->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rval) {
+ kfree(conn_info);
+ return rval;
+ }
+
rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info);
- if (rval)
+ if (rval) {
+ /* delete doorbell from doorbell recovery mechanism */
+ rval = qedi_ops->common->db_recovery_del(qedi->cdev,
+ qedi_ep->p_doorbell,
+ &qedi_ep->db_data);
+
QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n",
rval, qedi_ep);
+ }
kfree(conn_info);
return rval;
@@ -1109,6 +1136,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
test_bit(QEDI_IN_RECOVERY, &qedi->flags))
goto ep_release_conn;
+ /* Delete doorbell from doorbell recovery mechanism */
+ ret = qedi_ops->common->db_recovery_del(qedi->cdev,
+ qedi_ep->p_doorbell,
+ &qedi_ep->db_data);
+
ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
if (ret) {
QEDI_WARN(&qedi->dbg_ctx,
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 758735209e15..a31c5de74754 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -80,6 +80,7 @@ struct qedi_endpoint {
u32 handle;
u32 fw_cid;
void __iomem *p_doorbell;
+ struct iscsi_db_data db_data;
/* Send queue management */
struct iscsi_wqe *sq;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 0b0acb827071..e6dc0b495a82 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1621,7 +1621,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
{
u32 *list;
int i;
- int status = 0, rc;
+ int status;
u32 *pbl;
dma_addr_t page;
int num_pages;
@@ -1632,14 +1632,14 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
*/
if (!qedi->num_queues) {
QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
- return 1;
+ return -ENOMEM;
}
/* Make sure we allocated the PBL that will contain the physical
* addresses of our queues
*/
if (!qedi->p_cpuq) {
- status = 1;
+ status = -EINVAL;
goto mem_alloc_failure;
}
@@ -1654,13 +1654,13 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
"qedi->global_queues=%p.\n", qedi->global_queues);
/* Allocate DMA coherent buffers for BDQ */
- rc = qedi_alloc_bdq(qedi);
- if (rc)
+ status = qedi_alloc_bdq(qedi);
+ if (status)
goto mem_alloc_failure;
/* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
- rc = qedi_alloc_nvm_iscsi_cfg(qedi);
- if (rc)
+ status = qedi_alloc_nvm_iscsi_cfg(qedi);
+ if (status)
goto mem_alloc_failure;
/* Allocate a CQ and an associated PBL for each MSI-X
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 928da90b79be..d0b4e063bfe1 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -490,7 +490,6 @@ __setup("qla1280=", qla1280_setup);
#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
#define CMD_RESULT(Cmnd) Cmnd->result
#define CMD_HANDLE(Cmnd) Cmnd->host_scribble
-#define CMD_REQUEST(Cmnd) Cmnd->request->cmd
#define CMD_HOST(Cmnd) Cmnd->device->host
#define SCSI_BUS_32(Cmnd) Cmnd->device->channel
@@ -2827,7 +2826,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
/* Set ISP command timeout. */
- pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+ pkt->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
/* Set device target ID and LUN */
pkt->lun = SCSI_LUN_32(cmd);
@@ -3082,7 +3081,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
/* Set ISP command timeout. */
- pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+ pkt->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
/* Set device target ID and LUN */
pkt->lun = SCSI_LUN_32(cmd);
@@ -3981,7 +3980,7 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
} */
printk(" tag=%d, transfersize=0x%x \n",
- cmd->tag, cmd->transfersize);
+ scsi_cmd_to_rq(cmd)->tag, cmd->transfersize);
printk(" SP=0x%p\n", CMD_SP(cmd));
printk(" underflow size = 0x%x, direction=0x%x\n",
cmd->underflow, cmd->sc_data_direction);
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 17d5bc1cc56b..cbc1303e761e 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
- qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o
+ qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o \
+ qla_edif.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 3aa9869f6fae..d09776b77af2 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1887,6 +1887,30 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
}
+static ssize_t
+qla2x00_mpi_pause_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int rval = 0;
+
+ if (sscanf(buf, "%d", &rval) != 1)
+ return -EINVAL;
+
+ ql_log(ql_log_warn, vha, 0x7089, "Pausing MPI...\n");
+
+ rval = qla83xx_wr_reg(vha, 0x002012d4, 0x30000001);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x708a, "Unable to pause MPI.\n");
+ count = 0;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(mpi_pause, S_IWUSR, NULL, qla2x00_mpi_pause_store);
+
/* ----- */
static ssize_t
@@ -2435,6 +2459,7 @@ static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
qla2x00_port_speed_store);
static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
+static DEVICE_ATTR_RO(edif_doorbell);
struct device_attribute *qla2x00_host_attrs[] = {
@@ -2480,6 +2505,8 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_port_no,
&dev_attr_fw_attr,
&dev_attr_dport_diagnostics,
+ &dev_attr_edif_doorbell,
+ &dev_attr_mpi_pause,
NULL, /* reserve for qlini_mode */
NULL, /* reserve for ql2xiniexchg */
NULL, /* reserve for ql2xexchoffld */
@@ -2706,12 +2733,14 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
* final cleanup of firmware resources (PCBs and XCBs).
*/
if (fcport->loop_id != FC_NO_LOOP_ID) {
- if (IS_FWI2_CAPABLE(fcport->vha->hw))
- fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
- fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
- else
+ if (IS_FWI2_CAPABLE(fcport->vha->hw)) {
+ if (fcport->loop_id != FC_NO_LOOP_ID)
+ fcport->logout_on_delete = 1;
+
+ qlt_schedule_sess_for_deletion(fcport);
+ } else {
qla2x00_port_logout(fcport->vha, fcport);
+ }
}
}
@@ -3107,6 +3136,9 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
qla2x00_wait_for_sess_deletion(vha);
qla_nvme_delete(vha);
+ qla_enode_stop(vha);
+ qla_edb_stop(vha);
+
vha->flags.delete_progress = 1;
qlt_remove_target(ha, vha);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index d42b2ad84049..4b5d28d89d69 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -25,6 +25,10 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
struct bsg_job *bsg_job = sp->u.bsg_job;
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ ql_dbg(ql_dbg_user, sp->vha, 0x7009,
+ "%s: sp hdl %x, result=%x bsg ptr %p\n",
+ __func__, sp->handle, res, bsg_job);
+
sp->free(sp);
bsg_reply->result = res;
@@ -53,11 +57,19 @@ void qla2x00_bsg_sp_free(srb_t *sp)
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
} else {
- dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (sp->remap.remapped) {
+ dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
+ sp->remap.rsp.dma);
+ dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
+ sp->remap.req.dma);
+ } else {
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ }
}
if (sp->type == SRB_CT_CMD ||
@@ -266,6 +278,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
int req_sg_cnt, rsp_sg_cnt;
int rval = (DID_ERROR << 16);
uint16_t nextlid = 0;
+ uint32_t els_cmd = 0;
if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
rport = fc_bsg_to_rport(bsg_job);
@@ -279,6 +292,9 @@ qla2x00_process_els(struct bsg_job *bsg_job)
vha = shost_priv(host);
ha = vha->hw;
type = "FC_BSG_HST_ELS_NOLOGIN";
+ els_cmd = bsg_request->rqst_data.h_els.command_code;
+ if (els_cmd == ELS_AUTH_ELS)
+ return qla_edif_process_els(vha, bsg_job);
}
if (!vha->flags.online) {
@@ -2768,10 +2784,13 @@ qla2x00_manage_host_port(struct bsg_job *bsg_job)
}
static int
-qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
+qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job)
{
struct fc_bsg_request *bsg_request = bsg_job->request;
+ ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n",
+ __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
+
switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
case QL_VND_LOOPBACK:
return qla2x00_process_loopback(bsg_job);
@@ -2840,6 +2859,9 @@ qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
case QL_VND_DPORT_DIAGNOSTICS:
return qla2x00_do_dport_diagnostics(bsg_job);
+ case QL_VND_EDIF_MGMT:
+ return qla_edif_app_mgmt(bsg_job);
+
case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
return qla2x00_get_flash_image_status(bsg_job);
@@ -2897,12 +2919,19 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
ql_dbg(ql_dbg_user, vha, 0x709f,
"BSG: ISP abort active/needed -- cmd=%d.\n",
bsg_request->msgcode);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
return -EBUSY;
}
+ if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ return -EIO;
+ }
+
skip_chip_chk:
- ql_dbg(ql_dbg_user, vha, 0x7000,
- "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
+ ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
+ "Entered %s msgcode=0x%x. bsg ptr %px\n",
+ __func__, bsg_request->msgcode, bsg_job);
switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
@@ -2913,7 +2942,7 @@ skip_chip_chk:
ret = qla2x00_process_ct(bsg_job);
break;
case FC_BSG_HST_VENDOR:
- ret = qla2x00_process_vendor_specific(bsg_job);
+ ret = qla2x00_process_vendor_specific(vha, bsg_job);
break;
case FC_BSG_HST_ADD_RPORT:
case FC_BSG_HST_DEL_RPORT:
@@ -2922,6 +2951,10 @@ skip_chip_chk:
ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
break;
}
+
+ ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
+ "%s done with return %x\n", __func__, ret);
+
return ret;
}
@@ -2936,6 +2969,8 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
unsigned long flags;
struct req_que *req;
+ ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
+ __func__, bsg_job);
/* find the bsg job from the active list of commands */
spin_lock_irqsave(&ha->hardware_lock, flags);
for (que = 0; que < ha->max_req_queues; que++) {
@@ -2945,27 +2980,26 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
- if (sp) {
- if (((sp->type == SRB_CT_CMD) ||
- (sp->type == SRB_ELS_CMD_HST) ||
- (sp->type == SRB_FXIOCB_BCMD))
- && (sp->u.bsg_job == bsg_job)) {
- req->outstanding_cmds[cnt] = NULL;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
- ql_log(ql_log_warn, vha, 0x7089,
- "mbx abort_command "
- "failed.\n");
- bsg_reply->result = -EIO;
- } else {
- ql_dbg(ql_dbg_user, vha, 0x708a,
- "mbx abort_command "
- "success.\n");
- bsg_reply->result = 0;
- }
- spin_lock_irqsave(&ha->hardware_lock, flags);
- goto done;
+ if (sp &&
+ (sp->type == SRB_CT_CMD ||
+ sp->type == SRB_ELS_CMD_HST ||
+ sp->type == SRB_ELS_CMD_HST_NOLOGIN ||
+ sp->type == SRB_FXIOCB_BCMD) &&
+ sp->u.bsg_job == bsg_job) {
+ req->outstanding_cmds[cnt] = NULL;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(sp)) {
+ ql_log(ql_log_warn, vha, 0x7089,
+ "mbx abort_command failed.\n");
+ bsg_reply->result = -EIO;
+ } else {
+ ql_dbg(ql_dbg_user, vha, 0x708a,
+ "mbx abort_command success.\n");
+ bsg_reply->result = 0;
}
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ goto done;
+
}
}
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 0274e99e4a12..dd793cf8bc1e 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -31,6 +31,7 @@
#define QL_VND_DPORT_DIAGNOSTICS 0x19
#define QL_VND_GET_PRIV_STATS_EX 0x1A
#define QL_VND_SS_GET_FLASH_IMAGE_STATUS 0x1E
+#define QL_VND_EDIF_MGMT 0X1F
#define QL_VND_MANAGE_HOST_STATS 0x23
#define QL_VND_GET_HOST_STATS 0x24
#define QL_VND_GET_TGT_STATS 0x25
@@ -294,4 +295,6 @@ struct qla_active_regions {
uint8_t reserved[32];
} __packed;
+#include "qla_edif_bsg.h"
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index f2d05592c1e2..25549a8a2d72 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -12,8 +12,7 @@
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x0199 | |
* | Mailbox commands | 0x1206 | 0x11a5-0x11ff |
- * | Device Discovery | 0x2134 | 0x210e-0x2116 |
- * | | | 0x211a |
+ * | Device Discovery | 0x2134 | 0x210e-0x2115 |
* | | | 0x211c-0x2128 |
* | | | 0x212c-0x2134 |
* | Queue Command and IO tracing | 0x3074 | 0x300b |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 9eb708e5e22e..f1f6c740bdcd 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -367,6 +367,7 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
+#define ql_dbg_edif 0x00000400 /* edif and purex debug */
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 2f67ec1df3e6..be2eb75ee1a3 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -49,6 +49,28 @@ typedef struct {
uint8_t domain;
} le_id_t;
+/*
+ * 24 bit port ID type definition.
+ */
+typedef union {
+ uint32_t b24 : 24;
+ struct {
+#ifdef __BIG_ENDIAN
+ uint8_t domain;
+ uint8_t area;
+ uint8_t al_pa;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t al_pa;
+ uint8_t area;
+ uint8_t domain;
+#else
+#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
+#endif
+ uint8_t rsvd_1;
+ } b;
+} port_id_t;
+#define INVALID_PORT_ID 0xFFFFFF
+
#include "qla_bsg.h"
#include "qla_dsd.h"
#include "qla_nx.h"
@@ -319,6 +341,13 @@ struct name_list_extended {
u32 size;
u8 sent;
};
+
+struct els_reject {
+ struct fc_els_ls_rjt *c;
+ dma_addr_t cdma;
+ u16 size;
+};
+
/*
* Timeout timer counts in seconds
*/
@@ -345,6 +374,8 @@ struct name_list_extended {
#define FW_MAX_EXCHANGES_CNT (32 * 1024)
#define REDUCE_EXCHANGES_CNT (8 * 1024)
+#define SET_DID_STATUS(stat_var, status) (stat_var = status << 16)
+
struct req_que;
struct qla_tgt_sess;
@@ -370,32 +401,10 @@ struct srb_cmd {
#define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */
#define SRB_WAKEUP_ON_COMP BIT_6
#define SRB_DIF_BUNDL_DMA_VALID BIT_7 /* DIF: DMA list valid */
+#define SRB_EDIF_CLEANUP_DELETE BIT_9
/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
-
-/*
- * 24 bit port ID type definition.
- */
-typedef union {
- uint32_t b24 : 24;
-
- struct {
-#ifdef __BIG_ENDIAN
- uint8_t domain;
- uint8_t area;
- uint8_t al_pa;
-#elif defined(__LITTLE_ENDIAN)
- uint8_t al_pa;
- uint8_t area;
- uint8_t domain;
-#else
-#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
-#endif
- uint8_t rsvd_1;
- } b;
-} port_id_t;
-#define INVALID_PORT_ID 0xFFFFFF
#define ISP_REG16_DISCONNECT 0xFFFF
static inline le_id_t be_id_to_le(be_id_t id)
@@ -483,6 +492,7 @@ struct srb_iocb {
#define SRB_LOGIN_SKIP_PRLI BIT_2
#define SRB_LOGIN_NVME_PRLI BIT_3
#define SRB_LOGIN_PRLI_ONLY BIT_4
+#define SRB_LOGIN_FCSP BIT_5
uint16_t data[2];
u32 iop[2];
} logio;
@@ -587,6 +597,10 @@ struct srb_iocb {
u16 cmd;
u16 vp_index;
} ctrlvp;
+ struct {
+ struct edif_sa_ctl *sa_ctl;
+ struct qla_sa_update_frame sa_frame;
+ } sa_update;
} u;
struct timer_list timer;
@@ -617,6 +631,21 @@ struct srb_iocb {
#define SRB_PRLI_CMD 21
#define SRB_CTRL_VP 22
#define SRB_PRLO_CMD 23
+#define SRB_SA_UPDATE 25
+#define SRB_ELS_CMD_HST_NOLOGIN 26
+#define SRB_SA_REPLACE 27
+
+struct qla_els_pt_arg {
+ u8 els_opcode;
+ u8 vp_idx;
+ __le16 nport_handle;
+ u16 control_flags;
+ __le32 rx_xchg_address;
+ port_id_t did;
+ u32 tx_len, tx_byte_count, rx_len, rx_byte_count;
+ dma_addr_t tx_addr, rx_addr;
+
+};
enum {
TYPE_SRB,
@@ -630,6 +659,13 @@ struct iocb_resource {
u16 iocb_cnt;
};
+struct bsg_cmd {
+ struct bsg_job *bsg_job;
+ union {
+ struct qla_els_pt_arg els_arg;
+ } u;
+};
+
typedef struct srb {
/*
* Do not move cmd_type field, it needs to
@@ -662,7 +698,21 @@ typedef struct srb {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
struct srb_cmd scmd;
+ struct bsg_cmd bsg_cmd;
} u;
+ struct {
+ bool remapped;
+ struct {
+ dma_addr_t dma;
+ void *buf;
+ uint len;
+ } req;
+ struct {
+ dma_addr_t dma;
+ void *buf;
+ uint len;
+ } rsp;
+ } remap;
/*
* Report completion status @res and call sp_put(@sp). @res is
* an NVMe status code, a SCSI result (e.g. DID_OK << 16) or a
@@ -2294,6 +2344,7 @@ struct imm_ntfy_from_isp {
__le16 nport_handle;
uint16_t reserved_2;
__le16 flags;
+#define NOTIFY24XX_FLAGS_FCSP BIT_5
#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
__le16 srr_rx_id;
@@ -2377,11 +2428,9 @@ struct mbx_24xx_entry {
*/
typedef enum {
FCT_UNKNOWN,
- FCT_RSCN,
- FCT_SWITCH,
- FCT_BROADCAST,
- FCT_INITIATOR,
- FCT_TARGET,
+ FCT_BROADCAST = 0x01,
+ FCT_INITIATOR = 0x02,
+ FCT_TARGET = 0x04,
FCT_NVME_INITIATOR = 0x10,
FCT_NVME_TARGET = 0x20,
FCT_NVME_DISCOVERY = 0x40,
@@ -2424,6 +2473,7 @@ enum discovery_state {
DSC_LOGIN_COMPLETE,
DSC_ADISC,
DSC_DELETE_PEND,
+ DSC_LOGIN_AUTH_PEND,
};
enum login_state { /* FW control Target side */
@@ -2467,6 +2517,8 @@ typedef struct fc_port {
unsigned int n2n_flag:1;
unsigned int explicit_logout:1;
unsigned int prli_pend_timer:1;
+ unsigned int do_prli_nvme:1;
+
uint8_t nvme_flag;
uint8_t node_name[WWN_SIZE];
@@ -2563,6 +2615,33 @@ typedef struct fc_port {
u64 tgt_short_link_down_cnt;
u64 tgt_link_down_time;
u64 dev_loss_tmo;
+ /*
+ * EDIF parameters for encryption.
+ */
+ struct {
+ uint32_t enable:1; /* device is edif enabled/req'd */
+ uint32_t app_stop:2;
+ uint32_t app_started:1;
+ uint32_t aes_gmac:1;
+ uint32_t app_sess_online:1;
+ uint32_t tx_sa_set:1;
+ uint32_t rx_sa_set:1;
+ uint32_t tx_sa_pending:1;
+ uint32_t rx_sa_pending:1;
+ uint32_t tx_rekey_cnt;
+ uint32_t rx_rekey_cnt;
+ uint64_t tx_bytes;
+ uint64_t rx_bytes;
+ uint8_t auth_state;
+ uint16_t authok:1;
+ uint16_t rekey_cnt;
+ struct list_head edif_indx_list;
+ spinlock_t indx_list_lock;
+
+ struct list_head tx_sa_list;
+ struct list_head rx_sa_list;
+ spinlock_t sa_list_lock;
+ } edif;
} fc_port_t;
enum {
@@ -2604,7 +2683,8 @@ static const char * const port_dstate_str[] = {
"UPD_FCPORT",
"LOGIN_COMPLETE",
"ADISC",
- "DELETE_PEND"
+ "DELETE_PEND",
+ "LOGIN_AUTH_PEND",
};
/*
@@ -2616,6 +2696,8 @@ static const char * const port_dstate_str[] = {
#define FCF_ASYNC_SENT BIT_3
#define FCF_CONF_COMP_SUPPORTED BIT_4
#define FCF_ASYNC_ACTIVE BIT_5
+#define FCF_FCSP_DEVICE BIT_6
+#define FCF_EDIF_DELETE BIT_7
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@@ -2707,7 +2789,7 @@ static const char * const port_dstate_str[] = {
/*
* FDMI HBA attribute types.
*/
-#define FDMI1_HBA_ATTR_COUNT 9
+#define FDMI1_HBA_ATTR_COUNT 10
#define FDMI2_HBA_ATTR_COUNT 17
#define FDMI_HBA_NODE_NAME 0x1
@@ -3386,6 +3468,7 @@ enum qla_work_type {
QLA_EVT_SP_RETRY,
QLA_EVT_IIDMA,
QLA_EVT_ELS_PLOGI,
+ QLA_EVT_SA_REPLACE,
};
@@ -3444,6 +3527,11 @@ struct qla_work_evt {
u8 fc4_type;
srb_t *sp;
} gpnft;
+ struct {
+ struct edif_sa_ctl *sa_ctl;
+ fc_port_t *fcport;
+ uint16_t nport_handle;
+ } sa_update;
} u;
};
@@ -3845,7 +3933,6 @@ struct qlt_hw_data {
int num_act_qpairs;
#define DEFAULT_NAQP 2
spinlock_t atio_lock ____cacheline_aligned;
- struct btree_head32 host_map;
};
#define MAX_QFULL_CMDS_ALLOC 8192
@@ -3935,7 +4022,9 @@ struct qla_hw_data {
uint32_t scm_supported_f:1;
/* Enabled in Driver */
uint32_t scm_enabled:1;
- uint32_t max_req_queue_warned:1;
+ uint32_t edif_hw:1;
+ uint32_t edif_enabled:1;
+ uint32_t n2n_fw_acc_sec:1;
uint32_t plogi_template_valid:1;
uint32_t port_isolated:1;
} flags;
@@ -4347,6 +4436,7 @@ struct qla_hw_data {
/* Cisco fabric attached */
#define FW_ATTR_EXT0_SCM_CISCO 0x00002000
#define FW_ATTR_EXT0_NVME2 BIT_13
+#define FW_ATTR_EXT0_EDIF BIT_5
uint16_t fw_attributes_ext[2];
uint32_t fw_memory_size;
uint32_t fw_transfer_size;
@@ -4619,8 +4709,24 @@ struct qla_hw_data {
struct qla_hw_data_stat stat;
pci_error_state_t pci_error_state;
u64 prev_cmd_cnt;
+ struct dma_pool *purex_dma_pool;
+ struct btree_head32 host_map;
+
+#define EDIF_NUM_SA_INDEX 512
+#define EDIF_TX_SA_INDEX_BASE EDIF_NUM_SA_INDEX
+ void *edif_rx_sa_id_map;
+ void *edif_tx_sa_id_map;
+ spinlock_t sadb_fp_lock;
+
+ struct list_head sadb_tx_index_list;
+ struct list_head sadb_rx_index_list;
+ spinlock_t sadb_lock; /* protects list */
+ struct els_reject elsrej;
+ u8 edif_post_stop_cnt_down;
};
+#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
+
struct active_regions {
uint8_t global;
struct {
@@ -4659,6 +4765,8 @@ struct purex_item {
} iocb;
};
+#include "qla_edif.h"
+
#define SCM_FLAG_RDF_REJECT 0x00
#define SCM_FLAG_RDF_COMPLETED 0x01
@@ -4888,6 +4996,8 @@ typedef struct scsi_qla_host {
u64 reset_cmd_err_cnt;
u64 link_down_time;
u64 short_link_down_cnt;
+ struct edif_dbell e_dbell;
+ struct pur_core pur_cinfo;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -5058,6 +5168,9 @@ struct secure_flash_update_block_pk {
#define QLA_BUSY 0x107
#define QLA_ALREADY_REGISTERED 0x109
#define QLA_OS_TIMER_EXPIRED 0x10a
+#define QLA_ERR_NO_QPAIR 0x10b
+#define QLA_ERR_NOT_FOUND 0x10c
+#define QLA_ERR_FROM_FW 0x10d
#define NVRAM_DELAY() udelay(10)
@@ -5088,6 +5201,43 @@ enum nexus_wait_type {
WAIT_LUN,
};
+#define INVALID_EDIF_SA_INDEX 0xffff
+#define RX_DELETE_NO_EDIF_SA_INDEX 0xfffe
+
+#define QLA_SKIP_HANDLE QLA_TGT_SKIP_HANDLE
+
+/* edif hash element */
+struct edif_list_entry {
+ uint16_t handle; /* nport_handle */
+ uint32_t update_sa_index;
+ uint32_t delete_sa_index;
+ uint32_t count; /* counter for filtering sa_index */
+#define EDIF_ENTRY_FLAGS_CLEANUP 0x01 /* this index is being cleaned up */
+ uint32_t flags; /* used by sadb cleanup code */
+ fc_port_t *fcport; /* needed by rx delay timer function */
+ struct timer_list timer; /* rx delay timer */
+ struct list_head next;
+};
+
+#define EDIF_TX_INDX_BASE 512
+#define EDIF_RX_INDX_BASE 0
+#define EDIF_RX_DELETE_FILTER_COUNT 3 /* delay queuing rx delete until this many */
+
+/* entry in the sa_index free pool */
+
+struct sa_index_pair {
+ uint16_t sa_index;
+ uint32_t spi;
+};
+
+/* edif sa_index data structure */
+struct edif_sa_index_entry {
+ struct sa_index_pair sa_pair[2];
+ fc_port_t *fcport;
+ uint16_t handle;
+ struct list_head next;
+};
+
/* Refer to SNIA SFF 8247 */
struct sff_8247_a0 {
u8 txid; /* transceiver id */
@@ -5203,9 +5353,12 @@ struct sff_8247_a0 {
#define NVME_FCP_TARGET(fcport) \
(FCP_TYPE(fcport) && NVME_TYPE(fcport)) \
+#define NVME_PRIORITY(ha, fcport) \
+ (NVME_FCP_TARGET(fcport) && \
+ (ha->fc4_type_priority == FC4_PRIORITY_NVME))
+
#define NVME_TARGET(ha, fcport) \
- ((NVME_FCP_TARGET(fcport) && \
- (ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \
+ (fcport->do_prli_nvme || \
NVME_ONLY_TARGET(fcport)) \
#define PRLI_PHASE(_cls) \
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
new file mode 100644
index 000000000000..ad746c62f0d4
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -0,0 +1,3461 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Marvell Fibre Channel HBA Driver
+ * Copyright (c) 2021 Marvell
+ */
+#include "qla_def.h"
+#include "qla_edif.h"
+
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <scsi/scsi_tcq.h>
+
+static struct edif_sa_index_entry *qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
+ struct list_head *sa_list);
+static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
+ struct qla_sa_update_frame *sa_frame);
+static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
+ uint16_t sa_index);
+static int qla_pur_get_pending(scsi_qla_host_t *, fc_port_t *, struct bsg_job *);
+
+struct edb_node {
+ struct list_head list;
+ uint32_t ntype;
+ union {
+ port_id_t plogi_did;
+ uint32_t async;
+ port_id_t els_sid;
+ struct edif_sa_update_aen sa_aen;
+ } u;
+};
+
+static struct els_sub_cmd {
+ uint16_t cmd;
+ const char *str;
+} sc_str[] = {
+ {SEND_ELS, "send ELS"},
+ {SEND_ELS_REPLY, "send ELS Reply"},
+ {PULL_ELS, "retrieve ELS"},
+};
+
+const char *sc_to_str(uint16_t cmd)
+{
+ int i;
+ struct els_sub_cmd *e;
+
+ for (i = 0; i < ARRAY_SIZE(sc_str); i++) {
+ e = sc_str + i;
+ if (cmd == e->cmd)
+ return e->str;
+ }
+ return "unknown";
+}
+
+static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
+ uint16_t handle)
+{
+ struct edif_list_entry *entry;
+ struct edif_list_entry *tentry;
+ struct list_head *indx_list = &fcport->edif.edif_indx_list;
+
+ list_for_each_entry_safe(entry, tentry, indx_list, next) {
+ if (entry->handle == handle)
+ return entry;
+ }
+ return NULL;
+}
+
+/* timeout called when no traffic and delayed rx sa_index delete */
+static void qla2x00_sa_replace_iocb_timeout(struct timer_list *t)
+{
+ struct edif_list_entry *edif_entry = from_timer(edif_entry, t, timer);
+ fc_port_t *fcport = edif_entry->fcport;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct edif_sa_ctl *sa_ctl;
+ uint16_t nport_handle;
+ unsigned long flags = 0;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3069,
+ "%s: nport_handle 0x%x, SA REPL Delay Timeout, %8phC portid=%06x\n",
+ __func__, edif_entry->handle, fcport->port_name, fcport->d_id.b24);
+
+ /*
+ * if delete_sa_index is valid then no one has serviced this
+ * delayed delete
+ */
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+
+ /*
+ * delete_sa_index is invalidated when we find the new sa_index in
+ * the incoming data stream. If it is not invalidated then we are
+ * still looking for the new sa_index because there is no I/O and we
+ * need to just force the rx delete and move on. Otherwise
+ * we could get another rekey which will result in an error 66.
+ */
+ if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
+ uint16_t delete_sa_index = edif_entry->delete_sa_index;
+
+ edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
+ nport_handle = edif_entry->handle;
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+
+ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
+ delete_sa_index, 0);
+
+ if (sa_ctl) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n",
+ __func__, sa_ctl, delete_sa_index, edif_entry->update_sa_index,
+ nport_handle);
+
+ sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
+ set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
+ qla_post_sa_replace_work(fcport->vha, fcport,
+ nport_handle, sa_ctl);
+
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_ctl not found for delete_sa_index: %d\n",
+ __func__, edif_entry->delete_sa_index);
+ }
+ } else {
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ }
+}
+
+/*
+ * create a new list entry for this nport handle and
+ * add an sa_update index to the list - called for sa_update
+ */
+static int qla_edif_list_add_sa_update_index(fc_port_t *fcport,
+ uint16_t sa_index, uint16_t handle)
+{
+ struct edif_list_entry *entry;
+ unsigned long flags = 0;
+
+ /* if the entry exists, then just update the sa_index */
+ entry = qla_edif_list_find_sa_index(fcport, handle);
+ if (entry) {
+ entry->update_sa_index = sa_index;
+ entry->count = 0;
+ return 0;
+ }
+
+ /*
+ * This is the normal path - there should be no existing entry
+ * when update is called. The exception is at startup
+ * when update is called for the first two sa_indexes
+ * followed by a delete of the first sa_index
+ */
+ entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&entry->next);
+ entry->handle = handle;
+ entry->update_sa_index = sa_index;
+ entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
+ entry->count = 0;
+ entry->flags = 0;
+ timer_setup(&entry->timer, qla2x00_sa_replace_iocb_timeout, 0);
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+ list_add_tail(&entry->next, &fcport->edif.edif_indx_list);
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ return 0;
+}
+
+/* remove an entry from the list */
+static void qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+ list_del(&entry->next);
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+}
+
+int qla_post_sa_replace_work(struct scsi_qla_host *vha,
+ fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_SA_REPLACE);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.sa_update.fcport = fcport;
+ e->u.sa_update.sa_ctl = sa_ctl;
+ e->u.sa_update.nport_handle = nport_handle;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
+ return qla2x00_post_work(vha, e);
+}
+
+static void
+qla_edif_sa_ctl_init(scsi_qla_host_t *vha, struct fc_port *fcport)
+{
+ ql_dbg(ql_dbg_edif, vha, 0x2058,
+ "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n",
+ fcport->node_name, fcport->port_name, fcport->d_id.b24);
+
+ fcport->edif.tx_rekey_cnt = 0;
+ fcport->edif.rx_rekey_cnt = 0;
+
+ fcport->edif.tx_bytes = 0;
+ fcport->edif.rx_bytes = 0;
+}
+
+static int qla_bsg_check(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
+fc_port_t *fcport)
+{
+ struct extra_auth_els *p;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct qla_bsg_auth_els_request *req =
+ (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+ if (!vha->hw->flags.edif_enabled) {
+ ql_dbg(ql_dbg_edif, vha, 0x9105,
+ "%s edif not enabled\n", __func__);
+ goto done;
+ }
+ if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s doorbell not enabled\n", __func__);
+ goto done;
+ }
+
+ p = &req->e;
+
+ /* Get response */
+ if (p->sub_cmd == PULL_ELS) {
+ struct qla_bsg_auth_els_reply *rpl =
+ (struct qla_bsg_auth_els_reply *)bsg_job->reply;
+
+ qla_pur_get_pending(vha, fcport, bsg_job);
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %p\n",
+ __func__, sc_to_str(p->sub_cmd), fcport->port_name,
+ fcport->d_id.b24, rpl->rx_xchg_address,
+ rpl->r.reply_payload_rcv_len, bsg_job);
+
+ goto done;
+ }
+ return 0;
+
+done:
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return -EIO;
+}
+
+fc_port_t *
+qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id)
+{
+ fc_port_t *f, *tf;
+
+ f = NULL;
+ list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
+ if ((f->flags & FCF_FCSP_DEVICE)) {
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x2058,
+ "Found secure fcport - nn %8phN pn %8phN portid=0x%x, 0x%x.\n",
+ f->node_name, f->port_name,
+ f->d_id.b24, id->b24);
+ if (f->d_id.b24 == id->b24)
+ return f;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * qla_edif_app_check(): check for valid application id.
+ * @vha: host adapter pointer
+ * @appid: application id
+ * Return: false = fail, true = pass
+ */
+static bool
+qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
+{
+ /* check that the app is allow/known to the driver */
+
+ if (appid.app_vid == EDIF_APP_ID) {
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, "%s app id ok\n", __func__);
+ return true;
+ }
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
+ __func__, appid.app_vid);
+
+ return false;
+}
+
+static void qla_edif_reset_auth_wait(struct fc_port *fcport, int state,
+ int waitonly)
+{
+ int cnt, max_cnt = 200;
+ bool traced = false;
+
+ fcport->keep_nport_handle = 1;
+
+ if (!waitonly) {
+ qla2x00_set_fcport_disc_state(fcport, state);
+ qlt_schedule_sess_for_deletion(fcport);
+ } else {
+ qla2x00_set_fcport_disc_state(fcport, state);
+ }
+
+ ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
+ "%s: waiting for session, max_cnt=%u\n",
+ __func__, max_cnt);
+
+ cnt = 0;
+
+ if (waitonly) {
+ /* Marker wait min 10 msecs. */
+ msleep(50);
+ cnt += 50;
+ }
+ while (1) {
+ if (!traced) {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
+ "%s: session sleep.\n",
+ __func__);
+ traced = true;
+ }
+ msleep(20);
+ cnt++;
+ if (waitonly && (fcport->disc_state == state ||
+ fcport->disc_state == DSC_LOGIN_COMPLETE))
+ break;
+ if (fcport->disc_state == DSC_LOGIN_AUTH_PEND)
+ break;
+ if (cnt > max_cnt)
+ break;
+ }
+
+ if (!waitonly) {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
+ "%s: waited for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n",
+ __func__, fcport->port_name, fcport->loop_id,
+ fcport->d_id.b24, fcport, fcport->disc_state, cnt);
+ } else {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
+ "%s: waited ONLY for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n",
+ __func__, fcport->port_name, fcport->loop_id,
+ fcport->d_id.b24, fcport, fcport->disc_state, cnt);
+ }
+}
+
+static void
+qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl,
+ int index)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
+ list_del(&sa_ctl->next);
+ spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
+ if (index >= 512)
+ fcport->edif.tx_rekey_cnt--;
+ else
+ fcport->edif.rx_rekey_cnt--;
+ kfree(sa_ctl);
+}
+
+/* return an index to the freepool */
+static void qla_edif_add_sa_index_to_freepool(fc_port_t *fcport, int dir,
+ uint16_t sa_index)
+{
+ void *sa_id_map;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+ u16 lsa_index = sa_index;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "%s: entry\n", __func__);
+
+ if (dir) {
+ sa_id_map = ha->edif_tx_sa_id_map;
+ lsa_index -= EDIF_TX_SA_INDEX_BASE;
+ } else {
+ sa_id_map = ha->edif_rx_sa_id_map;
+ }
+
+ spin_lock_irqsave(&ha->sadb_fp_lock, flags);
+ clear_bit(lsa_index, sa_id_map);
+ spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: index %d added to free pool\n", __func__, sa_index);
+}
+
+static void __qla2x00_release_all_sadb(struct scsi_qla_host *vha,
+ struct fc_port *fcport, struct edif_sa_index_entry *entry,
+ int pdir)
+{
+ struct edif_list_entry *edif_entry;
+ struct edif_sa_ctl *sa_ctl;
+ int i, dir;
+ int key_cnt = 0;
+
+ for (i = 0; i < 2; i++) {
+ if (entry->sa_pair[i].sa_index == INVALID_EDIF_SA_INDEX)
+ continue;
+
+ if (fcport->loop_id != entry->handle) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n",
+ __func__, i, entry->handle, fcport->loop_id,
+ entry->sa_pair[i].sa_index);
+ }
+
+ /* release the sa_ctl */
+ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
+ entry->sa_pair[i].sa_index, pdir);
+ if (sa_ctl &&
+ qla_edif_find_sa_ctl_by_index(fcport, sa_ctl->index, pdir)) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index);
+ qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl);
+ }
+
+ /* Release the index */
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_index %d, nph: 0x%x\n",
+ __func__, entry->sa_pair[i].sa_index, entry->handle);
+
+ dir = (entry->sa_pair[i].sa_index <
+ EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
+ qla_edif_add_sa_index_to_freepool(fcport, dir,
+ entry->sa_pair[i].sa_index);
+
+ /* Delete timer on RX */
+ if (pdir != SAU_FLG_TX) {
+ edif_entry =
+ qla_edif_list_find_sa_index(fcport, entry->handle);
+ if (edif_entry) {
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: remove edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
+ __func__, edif_entry, edif_entry->update_sa_index,
+ edif_entry->delete_sa_index);
+ qla_edif_list_delete_sa_index(fcport, edif_entry);
+ /*
+ * valid delete_sa_index indicates there is a rx
+ * delayed delete queued
+ */
+ if (edif_entry->delete_sa_index !=
+ INVALID_EDIF_SA_INDEX) {
+ del_timer(&edif_entry->timer);
+
+ /* build and send the aen */
+ fcport->edif.rx_sa_set = 1;
+ fcport->edif.rx_sa_pending = 0;
+ qla_edb_eventcreate(vha,
+ VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ QL_VND_SA_STAT_SUCCESS,
+ QL_VND_RX_SA_KEY, fcport);
+ }
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: release edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
+ __func__, edif_entry, edif_entry->update_sa_index,
+ edif_entry->delete_sa_index);
+
+ kfree(edif_entry);
+ }
+ }
+ key_cnt++;
+ }
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: %d %s keys released\n",
+ __func__, key_cnt, pdir ? "tx" : "rx");
+}
+
+/* find an release all outstanding sadb sa_indicies */
+void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport)
+{
+ struct edif_sa_index_entry *entry, *tmp;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "%s: Starting...\n", __func__);
+
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+
+ list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
+ if (entry->fcport == fcport) {
+ list_del(&entry->next);
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+ __qla2x00_release_all_sadb(vha, fcport, entry, 0);
+ kfree(entry);
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ break;
+ }
+ }
+
+ list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
+ if (entry->fcport == fcport) {
+ list_del(&entry->next);
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+
+ __qla2x00_release_all_sadb(vha, fcport, entry, SAU_FLG_TX);
+
+ kfree(entry);
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+}
+
+/**
+ * qla_edif_app_start: application has announce its present
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ *
+ * Set/activate doorbell. Reset current sessions and re-login with
+ * secure flag.
+ */
+static int
+qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct app_start appstart;
+ struct app_start_reply appreply;
+ struct fc_port *fcport, *tf;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app start\n", __func__);
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appstart,
+ sizeof(struct app_start));
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n",
+ __func__, appstart.app_info.app_vid, appstart.app_start_flags);
+
+ if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ /* mark doorbell as active since an app is now present */
+ vha->e_dbell.db_flags = EDB_ACTIVE;
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e, "%s doorbell already active\n",
+ __func__);
+ }
+
+ if (N2N_TOPO(vha->hw)) {
+ if (vha->hw->flags.n2n_fw_acc_sec)
+ set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ ql_dbg(ql_dbg_edif, vha, 0xf084,
+ "%s: sess %p %8phC lid %#04x s_id %06x logout %d\n",
+ __func__, fcport, fcport->port_name,
+ fcport->loop_id, fcport->d_id.b24,
+ fcport->logout_on_delete);
+
+ ql_dbg(ql_dbg_edif, vha, 0xf084,
+ "keep %d els_logo %d disc state %d auth state %d stop state %d\n",
+ fcport->keep_nport_handle,
+ fcport->send_els_logo, fcport->disc_state,
+ fcport->edif.auth_state, fcport->edif.app_stop);
+
+ if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+ break;
+ if (!(fcport->flags & FCF_FCSP_DEVICE))
+ continue;
+
+ fcport->edif.app_started = 1;
+ if (fcport->edif.app_stop ||
+ (fcport->disc_state != DSC_LOGIN_COMPLETE &&
+ fcport->disc_state != DSC_LOGIN_PEND &&
+ fcport->disc_state != DSC_DELETED)) {
+ /* no activity */
+ fcport->edif.app_stop = 0;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
+ __func__, fcport->port_name);
+ fcport->edif.app_sess_online = 1;
+ qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0);
+ }
+ qla_edif_sa_ctl_init(vha, fcport);
+ }
+ }
+
+ if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
+ /* mark as active since an app is now present */
+ vha->pur_cinfo.enode_flags = ENODE_ACTIVE;
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911f, "%s enode already active\n",
+ __func__);
+ }
+
+ appreply.host_support_edif = vha->hw->flags.edif_enabled;
+ appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
+ appreply.edif_edb_active = vha->e_dbell.db_flags;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(struct app_start_reply);
+
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &appreply,
+ sizeof(struct app_start_reply));
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s app start completed with 0x%x\n",
+ __func__, rval);
+
+ return rval;
+}
+
+/**
+ * qla_edif_app_stop - app has announced it's exiting.
+ * @vha: host adapter pointer
+ * @bsg_job: user space command pointer
+ *
+ * Free any in flight messages, clear all doorbell events
+ * to application. Reject any message relate to security.
+ */
+static int
+qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct app_stop appstop;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct fc_port *fcport, *tf;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appstop,
+ sizeof(struct app_stop));
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Stopping APP: app_vid=%x\n",
+ __func__, appstop.app_info.app_vid);
+
+ /* Call db stop and enode stop functions */
+
+ /* if we leave this running short waits are operational < 16 secs */
+ qla_enode_stop(vha); /* stop enode */
+ qla_edb_stop(vha); /* stop db */
+
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (!(fcport->flags & FCF_FCSP_DEVICE))
+ continue;
+
+ if (fcport->flags & FCF_FCSP_DEVICE) {
+ ql_dbg(ql_dbg_edif, vha, 0xf084,
+ "%s: sess %p from port %8phC lid %#04x s_id %06x logout %d keep %d els_logo %d\n",
+ __func__, fcport,
+ fcport->port_name, fcport->loop_id, fcport->d_id.b24,
+ fcport->logout_on_delete, fcport->keep_nport_handle,
+ fcport->send_els_logo);
+
+ if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+ break;
+
+ fcport->edif.app_stop = 1;
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
+ __func__, fcport->port_name);
+
+ fcport->send_els_logo = 1;
+ qlt_schedule_sess_for_deletion(fcport);
+
+ /* qla_edif_flush_sa_ctl_lists(fcport); */
+ fcport->edif.app_started = 0;
+ }
+ }
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+ /* no return interface to app - it assumes we cleaned up ok */
+
+ return 0;
+}
+
+static int
+qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct app_plogi_reply *appplogireply)
+{
+ int ret = 0;
+
+ if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
+ __func__, fcport->port_name, fcport->edif.tx_sa_set,
+ fcport->edif.rx_sa_set);
+ appplogireply->prli_status = 0;
+ ret = 1;
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC Both SA(s) updated.\n", __func__,
+ fcport->port_name);
+ fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
+ fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
+ appplogireply->prli_status = 1;
+ }
+ return ret;
+}
+
+/**
+ * qla_edif_app_authok - authentication by app succeeded. Driver can proceed
+ * with prli
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ */
+static int
+qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ struct auth_complete_cmd appplogiok;
+ struct app_plogi_reply appplogireply = {0};
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ fc_port_t *fcport = NULL;
+ port_id_t portid = {0};
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appplogiok,
+ sizeof(struct auth_complete_cmd));
+
+ switch (appplogiok.type) {
+ case PL_TYPE_WWPN:
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ appplogiok.u.wwpn, 0);
+ if (!fcport)
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s wwpn lookup failed: %8phC\n",
+ __func__, appplogiok.u.wwpn);
+ break;
+ case PL_TYPE_DID:
+ fcport = qla2x00_find_fcport_by_pid(vha, &appplogiok.u.d_id);
+ if (!fcport)
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s d_id lookup failed: %x\n", __func__,
+ portid.b24);
+ break;
+ default:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s undefined type: %x\n", __func__,
+ appplogiok.type);
+ break;
+ }
+
+ if (!fcport) {
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto errstate_exit;
+ }
+
+ /*
+ * if port is online then this is a REKEY operation
+ * Only do sa update checking
+ */
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s Skipping PRLI complete based on rekey\n", __func__);
+ appplogireply.prli_status = 1;
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ qla_edif_app_chk_sa_update(vha, fcport, &appplogireply);
+ goto errstate_exit;
+ }
+
+ /* make sure in AUTH_PENDING or else reject */
+ if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC is not in auth pending state (%x)\n",
+ __func__, fcport->port_name, fcport->disc_state);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ appplogireply.prli_status = 0;
+ goto errstate_exit;
+ }
+
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ appplogireply.prli_status = 1;
+ fcport->edif.authok = 1;
+ if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
+ __func__, fcport->port_name, fcport->edif.tx_sa_set,
+ fcport->edif.rx_sa_set);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ appplogireply.prli_status = 0;
+ goto errstate_exit;
+
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC Both SA(s) updated.\n", __func__,
+ fcport->port_name);
+ fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
+ fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
+ }
+
+ if (qla_ini_mode_enabled(vha)) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s AUTH complete - RESUME with prli for wwpn %8phC\n",
+ __func__, fcport->port_name);
+ qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 1);
+ qla24xx_post_prli_work(vha, fcport);
+ }
+
+errstate_exit:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &appplogireply,
+ sizeof(struct app_plogi_reply));
+
+ return rval;
+}
+
+/**
+ * qla_edif_app_authfail - authentication by app has failed. Driver is given
+ * notice to tear down current session.
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ */
+static int
+qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ struct auth_complete_cmd appplogifail;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ fc_port_t *fcport = NULL;
+ port_id_t portid = {0};
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app auth fail\n", __func__);
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appplogifail,
+ sizeof(struct auth_complete_cmd));
+
+ /*
+ * TODO: edif: app has failed this plogi. Inform driver to
+ * take any action (if any).
+ */
+ switch (appplogifail.type) {
+ case PL_TYPE_WWPN:
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ appplogifail.u.wwpn, 0);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ break;
+ case PL_TYPE_DID:
+ fcport = qla2x00_find_fcport_by_pid(vha, &appplogifail.u.d_id);
+ if (!fcport)
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s d_id lookup failed: %x\n", __func__,
+ portid.b24);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ break;
+ default:
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s undefined type: %x\n", __func__,
+ appplogifail.type);
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ rval = -1;
+ break;
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s fcport is 0x%p\n", __func__, fcport);
+
+ if (fcport) {
+ /* set/reset edif values and flags */
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n",
+ __func__, fcport->port_name, fcport->loop_id, fcport->d_id.b24);
+
+ if (qla_ini_mode_enabled(fcport->vha)) {
+ fcport->send_els_logo = 1;
+ qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0);
+ }
+ }
+
+ return rval;
+}
+
+/**
+ * qla_edif_app_getfcinfo - app would like to read session info (wwpn, nportid,
+ * [initiator|target] mode. It can specific session with specific nport id or
+ * all sessions.
+ * @vha: host adapter pointer
+ * @bsg_job: user request pointer
+ */
+static int
+qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ int32_t num_cnt;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct app_pinfo_req app_req;
+ struct app_pinfo_reply *app_reply;
+ port_id_t tdid;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app get fcinfo\n", __func__);
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &app_req,
+ sizeof(struct app_pinfo_req));
+
+ num_cnt = app_req.num_ports; /* num of ports alloc'd by app */
+
+ app_reply = kzalloc((sizeof(struct app_pinfo_reply) +
+ sizeof(struct app_pinfo) * num_cnt), GFP_KERNEL);
+ if (!app_reply) {
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ rval = -1;
+ } else {
+ struct fc_port *fcport = NULL, *tf;
+ uint32_t pcnt = 0;
+
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (!(fcport->flags & FCF_FCSP_DEVICE))
+ continue;
+
+ tdid = app_req.remote_pid;
+
+ ql_dbg(ql_dbg_edif, vha, 0x2058,
+ "APP request entry - portid=%06x.\n", tdid.b24);
+
+ /* Ran out of space */
+ if (pcnt > app_req.num_ports)
+ break;
+
+ if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
+ continue;
+
+ app_reply->ports[pcnt].rekey_count =
+ fcport->edif.rekey_cnt;
+
+ app_reply->ports[pcnt].remote_type =
+ VND_CMD_RTYPE_UNKNOWN;
+ if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET))
+ app_reply->ports[pcnt].remote_type |=
+ VND_CMD_RTYPE_TARGET;
+ if (fcport->port_type & (FCT_NVME_INITIATOR | FCT_INITIATOR))
+ app_reply->ports[pcnt].remote_type |=
+ VND_CMD_RTYPE_INITIATOR;
+
+ app_reply->ports[pcnt].remote_pid = fcport->d_id;
+
+ ql_dbg(ql_dbg_edif, vha, 0x2058,
+ "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x secure %d.\n",
+ fcport->node_name, fcport->port_name, pcnt,
+ fcport->d_id.b24, fcport->flags & FCF_FCSP_DEVICE);
+
+ switch (fcport->edif.auth_state) {
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) {
+ fcport->edif.auth_state = VND_CMD_AUTH_STATE_NEEDED;
+ app_reply->ports[pcnt].auth_state =
+ VND_CMD_AUTH_STATE_NEEDED;
+ } else {
+ app_reply->ports[pcnt].auth_state =
+ VND_CMD_AUTH_STATE_ELS_RCVD;
+ }
+ break;
+ default:
+ app_reply->ports[pcnt].auth_state = fcport->edif.auth_state;
+ break;
+ }
+
+ memcpy(app_reply->ports[pcnt].remote_wwpn,
+ fcport->port_name, 8);
+
+ app_reply->ports[pcnt].remote_state =
+ (atomic_read(&fcport->state) ==
+ FCS_ONLINE ? 1 : 0);
+
+ pcnt++;
+
+ if (tdid.b24 != 0)
+ break;
+ }
+ app_reply->port_count = pcnt;
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ }
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, app_reply,
+ sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * num_cnt);
+
+ kfree(app_reply);
+
+ return rval;
+}
+
+/**
+ * qla_edif_app_getstats - app would like to read various statistics info
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ */
+static int32_t
+qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t ret_size, size;
+
+ struct app_sinfo_req app_req;
+ struct app_stats_reply *app_reply;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &app_req,
+ sizeof(struct app_sinfo_req));
+ if (app_req.num_ports == 0) {
+ ql_dbg(ql_dbg_async, vha, 0x911d,
+ "%s app did not indicate number of ports to return\n",
+ __func__);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ rval = -1;
+ }
+
+ size = sizeof(struct app_stats_reply) +
+ (sizeof(struct app_sinfo) * app_req.num_ports);
+
+ if (size > bsg_job->reply_payload.payload_len)
+ ret_size = bsg_job->reply_payload.payload_len;
+ else
+ ret_size = size;
+
+ app_reply = kzalloc(size, GFP_KERNEL);
+ if (!app_reply) {
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ rval = -1;
+ } else {
+ struct fc_port *fcport = NULL, *tf;
+ uint32_t pcnt = 0;
+
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (fcport->edif.enable) {
+ if (pcnt > app_req.num_ports)
+ break;
+
+ app_reply->elem[pcnt].rekey_count =
+ fcport->edif.rekey_cnt;
+ app_reply->elem[pcnt].tx_bytes =
+ fcport->edif.tx_bytes;
+ app_reply->elem[pcnt].rx_bytes =
+ fcport->edif.rx_bytes;
+
+ memcpy(app_reply->elem[pcnt].remote_wwpn,
+ fcport->port_name, 8);
+
+ pcnt++;
+ }
+ }
+ app_reply->elem_count = pcnt;
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ }
+
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, app_reply, ret_size);
+
+ kfree(app_reply);
+
+ return rval;
+}
+
+int32_t
+qla_edif_app_mgmt(struct bsg_job *bsg_job)
+{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct app_id appcheck;
+ bool done = true;
+ int32_t rval = 0;
+ uint32_t vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s vnd subcmd=%x\n",
+ __func__, vnd_sc);
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appcheck,
+ sizeof(struct app_id));
+
+ if (!vha->hw->flags.edif_enabled ||
+ test_bit(VPORT_DELETE, &vha->dpc_flags)) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
+ __func__, bsg_job, vha->dpc_flags);
+
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ if (!qla_edif_app_check(vha, appcheck)) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s app checked failed.\n",
+ __func__);
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ switch (vnd_sc) {
+ case QL_VND_SC_SA_UPDATE:
+ done = false;
+ rval = qla24xx_sadb_update(bsg_job);
+ break;
+ case QL_VND_SC_APP_START:
+ rval = qla_edif_app_start(vha, bsg_job);
+ break;
+ case QL_VND_SC_APP_STOP:
+ rval = qla_edif_app_stop(vha, bsg_job);
+ break;
+ case QL_VND_SC_AUTH_OK:
+ rval = qla_edif_app_authok(vha, bsg_job);
+ break;
+ case QL_VND_SC_AUTH_FAIL:
+ rval = qla_edif_app_authfail(vha, bsg_job);
+ break;
+ case QL_VND_SC_GET_FCINFO:
+ rval = qla_edif_app_getfcinfo(vha, bsg_job);
+ break;
+ case QL_VND_SC_GET_STATS:
+ rval = qla_edif_app_getstats(vha, bsg_job);
+ break;
+ default:
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n",
+ __func__,
+ bsg_request->rqst_data.h_vendor.vendor_cmd[1]);
+ rval = EXT_STATUS_INVALID_PARAM;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ break;
+ }
+
+done:
+ if (done) {
+ ql_dbg(ql_dbg_user, vha, 0x7009,
+ "%s: %d bsg ptr done %p\n", __func__, __LINE__, bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ }
+
+ return rval;
+}
+
+static struct edif_sa_ctl *
+qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame,
+ int dir)
+{
+ struct edif_sa_ctl *sa_ctl;
+ struct qla_sa_update_frame *sap;
+ int index = sa_frame->fast_sa_index;
+ unsigned long flags = 0;
+
+ sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL);
+ if (!sa_ctl) {
+ /* couldn't get space */
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "unable to allocate SA CTL\n");
+ return NULL;
+ }
+
+ /*
+ * need to allocate sa_index here and save it
+ * in both sa_ctl->index and sa_frame->fast_sa_index;
+ * If alloc fails then delete sa_ctl and return NULL
+ */
+ INIT_LIST_HEAD(&sa_ctl->next);
+ sap = &sa_ctl->sa_frame;
+ *sap = *sa_frame;
+ sa_ctl->index = index;
+ sa_ctl->fcport = fcport;
+ sa_ctl->flags = 0;
+ sa_ctl->state = 0L;
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: Added sa_ctl %p, index %d, state 0x%lx\n",
+ __func__, sa_ctl, sa_ctl->index, sa_ctl->state);
+ spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
+ if (dir == SAU_FLG_TX)
+ list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list);
+ else
+ list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list);
+ spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
+
+ return sa_ctl;
+}
+
+void
+qla_edif_flush_sa_ctl_lists(fc_port_t *fcport)
+{
+ struct edif_sa_ctl *sa_ctl, *tsa_ctl;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
+
+ list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.tx_sa_list,
+ next) {
+ list_del(&sa_ctl->next);
+ kfree(sa_ctl);
+ }
+
+ list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.rx_sa_list,
+ next) {
+ list_del(&sa_ctl->next);
+ kfree(sa_ctl);
+ }
+
+ spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
+}
+
+struct edif_sa_ctl *
+qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, int index, int dir)
+{
+ struct edif_sa_ctl *sa_ctl, *tsa_ctl;
+ struct list_head *sa_list;
+
+ if (dir == SAU_FLG_TX)
+ sa_list = &fcport->edif.tx_sa_list;
+ else
+ sa_list = &fcport->edif.rx_sa_list;
+
+ list_for_each_entry_safe(sa_ctl, tsa_ctl, sa_list, next) {
+ if (test_bit(EDIF_SA_CTL_USED, &sa_ctl->state) &&
+ sa_ctl->index == index)
+ return sa_ctl;
+ }
+ return NULL;
+}
+
+/* add the sa to the correct list */
+static int
+qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport,
+ struct qla_sa_update_frame *sa_frame)
+{
+ struct edif_sa_ctl *sa_ctl = NULL;
+ int dir;
+ uint16_t sa_index;
+
+ dir = (sa_frame->flags & SAU_FLG_TX);
+
+ /* map the spi to an sa_index */
+ sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame);
+ if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) {
+ /* process rx delete */
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
+ "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n",
+ __func__, fcport->loop_id, sa_frame->spi);
+
+ /* build and send the aen */
+ fcport->edif.rx_sa_set = 1;
+ fcport->edif.rx_sa_pending = 0;
+ qla_edb_eventcreate(fcport->vha,
+ VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ QL_VND_SA_STAT_SUCCESS,
+ QL_VND_RX_SA_KEY, fcport);
+
+ /* force a return of good bsg status; */
+ return RX_DELETE_NO_EDIF_SA_INDEX;
+ } else if (sa_index == INVALID_EDIF_SA_INDEX) {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: Failed to get sa_index for spi 0x%x, dir: %d\n",
+ __func__, sa_frame->spi, dir);
+ return INVALID_EDIF_SA_INDEX;
+ }
+
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n",
+ __func__, sa_index, sa_frame->spi, dir, fcport->loop_id);
+
+ /* This is a local copy of sa_frame. */
+ sa_frame->fast_sa_index = sa_index;
+ /* create the sa_ctl */
+ sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir);
+ if (!sa_ctl) {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n",
+ __func__, sa_frame->spi, dir, sa_index);
+ return -1;
+ }
+
+ set_bit(EDIF_SA_CTL_USED, &sa_ctl->state);
+
+ if (dir == SAU_FLG_TX)
+ fcport->edif.tx_rekey_cnt++;
+ else
+ fcport->edif.rx_rekey_cnt++;
+
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n",
+ __func__, sa_ctl, sa_ctl->index, sa_ctl->state,
+ fcport->edif.tx_rekey_cnt,
+ fcport->edif.rx_rekey_cnt, fcport->loop_id);
+
+ return 0;
+}
+
+#define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0
+#define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2
+
+int
+qla24xx_sadb_update(struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ scsi_qla_host_t *vha = shost_priv(host);
+ fc_port_t *fcport = NULL;
+ srb_t *sp = NULL;
+ struct edif_list_entry *edif_entry = NULL;
+ int found = 0;
+ int rval = 0;
+ int result = 0;
+ struct qla_sa_update_frame sa_frame;
+ struct srb_iocb *iocb_cmd;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d,
+ "%s entered, vha: 0x%p\n", __func__, vha);
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &sa_frame,
+ sizeof(struct qla_sa_update_frame));
+
+ /* Check if host is online */
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n");
+ rval = -EIO;
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ ql_log(ql_log_warn, vha, 0x70a1, "App not started\n");
+ rval = -EIO;
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ fcport = qla2x00_find_fcport_by_pid(vha, &sa_frame.port_id);
+ if (fcport) {
+ found = 1;
+ if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY)
+ fcport->edif.tx_bytes = 0;
+ if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_RX_KEY)
+ fcport->edif.rx_bytes = 0;
+ }
+
+ if (!found) {
+ ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n",
+ sa_frame.port_id.b24);
+ rval = -EINVAL;
+ SET_DID_STATUS(bsg_reply->result, DID_TARGET_FAILURE);
+ goto done;
+ }
+
+ /* make sure the nport_handle is valid */
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e1,
+ "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n",
+ __func__, fcport->port_name, sa_frame.spi,
+ fcport->disc_state);
+ rval = -EINVAL;
+ SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
+ goto done;
+ }
+
+ /* allocate and queue an sa_ctl */
+ result = qla24xx_check_sadb_avail_slot(bsg_job, fcport, &sa_frame);
+
+ /* failure of bsg */
+ if (result == INVALID_EDIF_SA_INDEX) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e1,
+ "%s: %8phN, skipping update.\n",
+ __func__, fcport->port_name);
+ rval = -EINVAL;
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+
+ /* rx delete failure */
+ } else if (result == RX_DELETE_NO_EDIF_SA_INDEX) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e1,
+ "%s: %8phN, skipping rx delete.\n",
+ __func__, fcport->port_name);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ goto done;
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x70e1,
+ "%s: %8phN, sa_index in sa_frame: %d flags %xh\n",
+ __func__, fcport->port_name, sa_frame.fast_sa_index,
+ sa_frame.flags);
+
+ /* looking for rx index and delete */
+ if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
+ (sa_frame.flags & SAU_FLG_INV)) {
+ uint16_t nport_handle = fcport->loop_id;
+ uint16_t sa_index = sa_frame.fast_sa_index;
+
+ /*
+ * make sure we have an existing rx key, otherwise just process
+ * this as a straight delete just like TX
+ * This is NOT a normal case, it indicates an error recovery or key cleanup
+ * by the ipsec code above us.
+ */
+ edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id);
+ if (!edif_entry) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n",
+ __func__, fcport->loop_id, sa_index);
+ goto force_rx_delete;
+ }
+
+ /*
+ * if we have a forced delete for rx, remove the sa_index from the edif list
+ * and proceed with normal delete. The rx delay timer should not be running
+ */
+ if ((sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) {
+ qla_edif_list_delete_sa_index(fcport, edif_entry);
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n",
+ __func__, fcport->loop_id, sa_index);
+ kfree(edif_entry);
+ goto force_rx_delete;
+ }
+
+ /*
+ * delayed rx delete
+ *
+ * if delete_sa_index is not invalid then there is already
+ * a delayed index in progress, return bsg bad status
+ */
+ if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
+ struct edif_sa_ctl *sa_ctl;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: delete for lid 0x%x, delete_sa_index %d is pending\n",
+ __func__, edif_entry->handle, edif_entry->delete_sa_index);
+
+ /* free up the sa_ctl that was allocated with the sa_index */
+ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index,
+ (sa_frame.flags & SAU_FLG_TX));
+ if (sa_ctl) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_ctl for index %d\n",
+ __func__, sa_ctl->index);
+ qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
+ }
+
+ /* release the sa_index */
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_index %d, nph: 0x%x\n",
+ __func__, sa_index, nport_handle);
+ qla_edif_sadb_delete_sa_index(fcport, nport_handle, sa_index);
+
+ rval = -EINVAL;
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ fcport->edif.rekey_cnt++;
+
+ /* configure and start the rx delay timer */
+ edif_entry->fcport = fcport;
+ edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n",
+ __func__, edif_entry, sa_index, nport_handle);
+
+ /*
+ * Start the timer when we queue the delayed rx delete.
+ * This is an activity timer that goes off if we have not
+ * received packets with the new sa_index
+ */
+ add_timer(&edif_entry->timer);
+
+ /*
+ * sa_delete for rx key with an active rx key including this one
+ * add the delete rx sa index to the hash so we can look for it
+ * in the rsp queue. Do this after making any changes to the
+ * edif_entry as part of the rx delete.
+ */
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n",
+ __func__, sa_index, nport_handle, bsg_job);
+
+ edif_entry->delete_sa_index = sa_index;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+
+ goto done;
+
+ /*
+ * rx index and update
+ * add the index to the list and continue with normal update
+ */
+ } else if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
+ ((sa_frame.flags & SAU_FLG_INV) == 0)) {
+ /* sa_update for rx key */
+ uint32_t nport_handle = fcport->loop_id;
+ uint16_t sa_index = sa_frame.fast_sa_index;
+ int result;
+
+ /*
+ * add the update rx sa index to the hash so we can look for it
+ * in the rsp queue and continue normally
+ */
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: adding update sa_index %d, lid 0x%x to edif_list\n",
+ __func__, sa_index, nport_handle);
+
+ result = qla_edif_list_add_sa_update_index(fcport, sa_index,
+ nport_handle);
+ if (result) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n",
+ __func__, sa_index, nport_handle);
+ }
+ }
+ if (sa_frame.flags & SAU_FLG_GMAC_MODE)
+ fcport->edif.aes_gmac = 1;
+ else
+ fcport->edif.aes_gmac = 0;
+
+force_rx_delete:
+ /*
+ * sa_update for both rx and tx keys, sa_delete for tx key
+ * immediately process the request
+ */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ rval = -ENOMEM;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done;
+ }
+
+ sp->type = SRB_SA_UPDATE;
+ sp->name = "bsg_sa_update";
+ sp->u.bsg_job = bsg_job;
+ /* sp->free = qla2x00_bsg_sp_free; */
+ sp->free = qla2x00_rel_sp;
+ sp->done = qla2x00_bsg_job_done;
+ iocb_cmd = &sp->u.iocb_cmd;
+ iocb_cmd->u.sa_update.sa_frame = sa_frame;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_dbg_edif, vha, 0x70e3,
+ "qla2x00_start_sp failed=%d.\n", rval);
+
+ qla2x00_rel_sp(sp);
+ rval = -EIO;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done;
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: %s sent, hdl=%x, portid=%06x.\n",
+ __func__, sp->name, sp->handle, fcport->d_id.b24);
+
+ fcport->edif.rekey_cnt++;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+ return 0;
+
+/*
+ * send back error status
+ */
+done:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s:status: FAIL, result: 0x%x, bsg ptr done %p\n",
+ __func__, bsg_reply->result, bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ return 0;
+}
+
+static void
+qla_enode_free(scsi_qla_host_t *vha, struct enode *node)
+{
+ node->ntype = N_UNDEF;
+ kfree(node);
+}
+
+/**
+ * qla_enode_init - initialize enode structs & lock
+ * @vha: host adapter pointer
+ *
+ * should only be called when driver attaching
+ */
+void
+qla_enode_init(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ char name[32];
+
+ if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) {
+ /* list still active - error */
+ ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode still active\n",
+ __func__);
+ return;
+ }
+
+ /* initialize lock which protects pur_core & init list */
+ spin_lock_init(&vha->pur_cinfo.pur_lock);
+ INIT_LIST_HEAD(&vha->pur_cinfo.head);
+
+ snprintf(name, sizeof(name), "%s_%d_purex", QLA2XXX_DRIVER_NAME,
+ ha->pdev->device);
+}
+
+/**
+ * qla_enode_stop - stop and clear and enode data
+ * @vha: host adapter pointer
+ *
+ * called when app notified it is exiting
+ */
+void
+qla_enode_stop(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct enode *node, *q;
+
+ if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
+ /* doorbell list not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s enode not active\n", __func__);
+ return;
+ }
+
+ /* grab lock so list doesn't move */
+ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
+
+ vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE; /* mark it not active */
+
+ /* hopefully this is a null list at this point */
+ list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) {
+ ql_dbg(ql_dbg_edif, vha, 0x910f,
+ "%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype,
+ node->dinfo.nodecnt);
+ list_del_init(&node->list);
+ qla_enode_free(vha, node);
+ }
+ spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
+}
+
+/*
+ * allocate enode struct and populate buffer
+ * returns: enode pointer with buffers
+ * NULL on error
+ */
+static struct enode *
+qla_enode_alloc(scsi_qla_host_t *vha, uint32_t ntype)
+{
+ struct enode *node;
+ struct purexevent *purex;
+
+ node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC);
+ if (!node)
+ return NULL;
+
+ purex = &node->u.purexinfo;
+ purex->msgp = (u8 *)(node + 1);
+ purex->msgp_len = ELS_MAX_PAYLOAD;
+
+ node->ntype = ntype;
+ INIT_LIST_HEAD(&node->list);
+ return node;
+}
+
+static void
+qla_enode_add(scsi_qla_host_t *vha, struct enode *ptr)
+{
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x9109,
+ "%s add enode for type=%x, cnt=%x\n",
+ __func__, ptr->ntype, ptr->dinfo.nodecnt);
+
+ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
+ list_add_tail(&ptr->list, &vha->pur_cinfo.head);
+ spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
+
+ return;
+}
+
+static struct enode *
+qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2)
+{
+ struct enode *node_rtn = NULL;
+ struct enode *list_node = NULL;
+ unsigned long flags;
+ struct list_head *pos, *q;
+ uint32_t sid;
+ uint32_t rw_flag;
+ struct purexevent *purex;
+
+ /* secure the list from moving under us */
+ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
+
+ list_for_each_safe(pos, q, &vha->pur_cinfo.head) {
+ list_node = list_entry(pos, struct enode, list);
+
+ /* node type determines what p1 and p2 are */
+ purex = &list_node->u.purexinfo;
+ sid = p1;
+ rw_flag = p2;
+
+ if (purex->pur_info.pur_sid.b24 == sid) {
+ if (purex->pur_info.pur_pend == 1 &&
+ rw_flag == PUR_GET) {
+ /*
+ * if the receive is in progress
+ * and its a read/get then can't
+ * transfer yet
+ */
+ ql_dbg(ql_dbg_edif, vha, 0x9106,
+ "%s purex xfer in progress for sid=%x\n",
+ __func__, sid);
+ } else {
+ /* found it and its complete */
+ node_rtn = list_node;
+ list_del(pos);
+ break;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
+
+ return node_rtn;
+}
+
+/**
+ * qla_pur_get_pending - read/return authentication message sent
+ * from remote port
+ * @vha: host adapter pointer
+ * @fcport: session pointer
+ * @bsg_job: user request where the message is copy to.
+ */
+static int
+qla_pur_get_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct bsg_job *bsg_job)
+{
+ struct enode *ptr;
+ struct purexevent *purex;
+ struct qla_bsg_auth_els_reply *rpl =
+ (struct qla_bsg_auth_els_reply *)bsg_job->reply;
+
+ bsg_job->reply_len = sizeof(*rpl);
+
+ ptr = qla_enode_find(vha, N_PUREX, fcport->d_id.b24, PUR_GET);
+ if (!ptr) {
+ ql_dbg(ql_dbg_edif, vha, 0x9111,
+ "%s no enode data found for %8phN sid=%06x\n",
+ __func__, fcport->port_name, fcport->d_id.b24);
+ SET_DID_STATUS(rpl->r.result, DID_IMM_RETRY);
+ return -EIO;
+ }
+
+ /*
+ * enode is now off the linked list and is ours to deal with
+ */
+ purex = &ptr->u.purexinfo;
+
+ /* Copy info back to caller */
+ rpl->rx_xchg_address = purex->pur_info.pur_rx_xchg_address;
+
+ SET_DID_STATUS(rpl->r.result, DID_OK);
+ rpl->r.reply_payload_rcv_len =
+ sg_pcopy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, purex->msgp,
+ purex->pur_info.pur_bytes_rcvd, 0);
+
+ /* data copy / passback completed - destroy enode */
+ qla_enode_free(vha, ptr);
+
+ return 0;
+}
+
+/* it is assume qpair lock is held */
+static int
+qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp,
+ struct qla_els_pt_arg *a)
+{
+ struct els_entry_24xx *els_iocb;
+
+ els_iocb = __qla2x00_alloc_iocbs(qp, NULL);
+ if (!els_iocb) {
+ ql_log(ql_log_warn, vha, 0x700c,
+ "qla2x00_alloc_iocbs failed.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ qla_els_pt_iocb(vha, els_iocb, a);
+
+ ql_dbg(ql_dbg_edif, vha, 0x0183,
+ "Sending ELS reject...\n");
+ ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185,
+ vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c));
+ /* flush iocb to mem before notifying hw doorbell */
+ wmb();
+ qla2x00_start_iocbs(vha, qp->req);
+ return 0;
+}
+
+void
+qla_edb_init(scsi_qla_host_t *vha)
+{
+ if (vha->e_dbell.db_flags == EDB_ACTIVE) {
+ /* list already init'd - error */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "edif db already initialized, cannot reinit\n");
+ return;
+ }
+
+ /* initialize lock which protects doorbell & init list */
+ spin_lock_init(&vha->e_dbell.db_lock);
+ INIT_LIST_HEAD(&vha->e_dbell.head);
+
+ /* create and initialize doorbell */
+ init_completion(&vha->e_dbell.dbell);
+}
+
+static void
+qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
+{
+ /*
+ * releases the space held by this edb node entry
+ * this function does _not_ free the edb node itself
+ * NB: the edb node entry passed should not be on any list
+ *
+ * currently for doorbell there's no additional cleanup
+ * needed, but here as a placeholder for furture use.
+ */
+
+ if (!node) {
+ ql_dbg(ql_dbg_edif, vha, 0x09122,
+ "%s error - no valid node passed\n", __func__);
+ return;
+ }
+
+ node->ntype = N_UNDEF;
+}
+
+/* function called when app is stopping */
+
+void
+qla_edb_stop(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct edb_node *node, *q;
+
+ if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ /* doorbell list not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s doorbell not enabled\n", __func__);
+ return;
+ }
+
+ /* grab lock so list doesn't move */
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+
+ vha->e_dbell.db_flags &= ~EDB_ACTIVE; /* mark it not active */
+ /* hopefully this is a null list at this point */
+ list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) {
+ ql_dbg(ql_dbg_edif, vha, 0x910f,
+ "%s freeing edb_node type=%x\n",
+ __func__, node->ntype);
+ qla_edb_node_free(vha, node);
+ list_del(&node->list);
+
+ kfree(node);
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ /* wake up doorbell waiters - they'll be dismissed with error code */
+ complete_all(&vha->e_dbell.dbell);
+}
+
+static struct edb_node *
+qla_edb_node_alloc(scsi_qla_host_t *vha, uint32_t ntype)
+{
+ struct edb_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_ATOMIC);
+ if (!node) {
+ /* couldn't get space */
+ ql_dbg(ql_dbg_edif, vha, 0x9100,
+ "edb node unable to be allocated\n");
+ return NULL;
+ }
+
+ node->ntype = ntype;
+ INIT_LIST_HEAD(&node->list);
+ return node;
+}
+
+/* adds a already allocated enode to the linked list */
+static bool
+qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
+{
+ unsigned long flags;
+
+ if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ /* doorbell list not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s doorbell not enabled\n", __func__);
+ return false;
+ }
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ list_add_tail(&ptr->list, &vha->e_dbell.head);
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ /* ring doorbell for waiters */
+ complete(&vha->e_dbell.dbell);
+
+ return true;
+}
+
+/* adds event to doorbell list */
+void
+qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
+ uint32_t data, uint32_t data2, fc_port_t *sfcport)
+{
+ struct edb_node *edbnode;
+ fc_port_t *fcport = sfcport;
+ port_id_t id;
+
+ if (!vha->hw->flags.edif_enabled) {
+ /* edif not enabled */
+ return;
+ }
+
+ if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ if (fcport)
+ fcport->edif.auth_state = dbtype;
+ /* doorbell list not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s doorbell not enabled (type=%d\n", __func__, dbtype);
+ return;
+ }
+
+ edbnode = qla_edb_node_alloc(vha, dbtype);
+ if (!edbnode) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s unable to alloc db node\n", __func__);
+ return;
+ }
+
+ if (!fcport) {
+ id.b.domain = (data >> 16) & 0xff;
+ id.b.area = (data >> 8) & 0xff;
+ id.b.al_pa = data & 0xff;
+ ql_dbg(ql_dbg_edif, vha, 0x09222,
+ "%s: Arrived s_id: %06x\n", __func__,
+ id.b24);
+ fcport = qla2x00_find_fcport_by_pid(vha, &id);
+ if (!fcport) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s can't find fcport for sid= 0x%x - ignoring\n",
+ __func__, id.b24);
+ kfree(edbnode);
+ return;
+ }
+ }
+
+ /* populate the edb node */
+ switch (dbtype) {
+ case VND_CMD_AUTH_STATE_NEEDED:
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ edbnode->u.plogi_did.b24 = fcport->d_id.b24;
+ break;
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ edbnode->u.els_sid.b24 = fcport->d_id.b24;
+ break;
+ case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
+ edbnode->u.sa_aen.port_id = fcport->d_id;
+ edbnode->u.sa_aen.status = data;
+ edbnode->u.sa_aen.key_type = data2;
+ break;
+ default:
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s unknown type: %x\n", __func__, dbtype);
+ qla_edb_node_free(vha, edbnode);
+ kfree(edbnode);
+ edbnode = NULL;
+ break;
+ }
+
+ if (edbnode && (!qla_edb_node_add(vha, edbnode))) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s unable to add dbnode\n", __func__);
+ qla_edb_node_free(vha, edbnode);
+ kfree(edbnode);
+ return;
+ }
+ if (edbnode && fcport)
+ fcport->edif.auth_state = dbtype;
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
+}
+
+static struct edb_node *
+qla_edb_getnext(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct edb_node *edbnode = NULL;
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+
+ /* db nodes are fifo - no qualifications done */
+ if (!list_empty(&vha->e_dbell.head)) {
+ edbnode = list_first_entry(&vha->e_dbell.head,
+ struct edb_node, list);
+ list_del(&edbnode->list);
+ }
+
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ return edbnode;
+}
+
+void
+qla_edif_timer(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) {
+ if (vha->e_dbell.db_flags != EDB_ACTIVE &&
+ ha->edif_post_stop_cnt_down) {
+ ha->edif_post_stop_cnt_down--;
+
+ /*
+ * turn off auto 'Plogi Acc + secure=1' feature
+ * Set Add FW option[3]
+ * BIT_15, if.
+ */
+ if (ha->edif_post_stop_cnt_down == 0) {
+ ql_dbg(ql_dbg_async, vha, 0x911d,
+ "%s chip reset to turn off PLOGI ACC + secure\n",
+ __func__);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
+ } else {
+ ha->edif_post_stop_cnt_down = 60;
+ }
+ }
+}
+
+/*
+ * app uses separate thread to read this. It'll wait until the doorbell
+ * is rung by the driver or the max wait time has expired
+ */
+ssize_t
+edif_doorbell_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct edb_node *dbnode = NULL;
+ struct edif_app_dbell *ap = (struct edif_app_dbell *)buf;
+ uint32_t dat_siz, buf_size, sz;
+
+ /* TODO: app currently hardcoded to 256. Will transition to bsg */
+ sz = 256;
+
+ /* stop new threads from waiting if we're not init'd */
+ if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
+ "%s error - edif db not enabled\n", __func__);
+ return 0;
+ }
+
+ if (!vha->hw->flags.edif_enabled) {
+ /* edif not enabled */
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
+ "%s error - edif not enabled\n", __func__);
+ return -1;
+ }
+
+ buf_size = 0;
+ while ((sz - buf_size) >= sizeof(struct edb_node)) {
+ /* remove the next item from the doorbell list */
+ dat_siz = 0;
+ dbnode = qla_edb_getnext(vha);
+ if (dbnode) {
+ ap->event_code = dbnode->ntype;
+ switch (dbnode->ntype) {
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ case VND_CMD_AUTH_STATE_NEEDED:
+ ap->port_id = dbnode->u.plogi_did;
+ dat_siz += sizeof(ap->port_id);
+ break;
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ ap->port_id = dbnode->u.els_sid;
+ dat_siz += sizeof(ap->port_id);
+ break;
+ case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
+ ap->port_id = dbnode->u.sa_aen.port_id;
+ memcpy(ap->event_data, &dbnode->u,
+ sizeof(struct edif_sa_update_aen));
+ dat_siz += sizeof(struct edif_sa_update_aen);
+ break;
+ default:
+ /* unknown node type, rtn unknown ntype */
+ ap->event_code = VND_CMD_AUTH_STATE_UNDEF;
+ memcpy(ap->event_data, &dbnode->ntype, 4);
+ dat_siz += 4;
+ break;
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s Doorbell consumed : type=%d %p\n",
+ __func__, dbnode->ntype, dbnode);
+ /* we're done with the db node, so free it up */
+ qla_edb_node_free(vha, dbnode);
+ kfree(dbnode);
+ } else {
+ break;
+ }
+
+ ap->event_data_size = dat_siz;
+ /* 8bytes = ap->event_code + ap->event_data_size */
+ buf_size += dat_siz + 8;
+ ap = (struct edif_app_dbell *)(buf + buf_size);
+ }
+ return buf_size;
+}
+
+static void qla_noop_sp_done(srb_t *sp, int res)
+{
+ sp->free(sp);
+}
+
+/*
+ * Called from work queue
+ * build and send the sa_update iocb to delete an rx sa_index
+ */
+int
+qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
+{
+ srb_t *sp;
+ fc_port_t *fcport = NULL;
+ struct srb_iocb *iocb_cmd = NULL;
+ int rval = QLA_SUCCESS;
+ struct edif_sa_ctl *sa_ctl = e->u.sa_update.sa_ctl;
+ uint16_t nport_handle = e->u.sa_update.nport_handle;
+
+ ql_dbg(ql_dbg_edif, vha, 0x70e6,
+ "%s: starting, sa_ctl: %p\n", __func__, sa_ctl);
+
+ if (!sa_ctl) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e6,
+ "sa_ctl allocation failed\n");
+ return -ENOMEM;
+ }
+
+ fcport = sa_ctl->fcport;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e6,
+ "SRB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ iocb_cmd = &sp->u.iocb_cmd;
+ iocb_cmd->u.sa_update.sa_ctl = sa_ctl;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3073,
+ "Enter: SA REPL portid=%06x, sa_ctl %p, index %x, nport_handle: 0x%x\n",
+ fcport->d_id.b24, sa_ctl, sa_ctl->index, nport_handle);
+ /*
+ * if this is a sadb cleanup delete, mark it so the isr can
+ * take the correct action
+ */
+ if (sa_ctl->flags & EDIF_SA_CTL_FLG_CLEANUP_DEL) {
+ /* mark this srb as a cleanup delete */
+ sp->flags |= SRB_EDIF_CLEANUP_DELETE;
+ ql_dbg(ql_dbg_edif, vha, 0x70e6,
+ "%s: sp 0x%p flagged as cleanup delete\n", __func__, sp);
+ }
+
+ sp->type = SRB_SA_REPLACE;
+ sp->name = "SA_REPLACE";
+ sp->fcport = fcport;
+ sp->free = qla2x00_rel_sp;
+ sp->done = qla_noop_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+
+ if (rval != QLA_SUCCESS)
+ rval = QLA_FUNCTION_FAILED;
+
+ return rval;
+}
+
+void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
+{
+ int itr = 0;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_sa_update_frame *sa_frame =
+ &sp->u.iocb_cmd.u.sa_update.sa_frame;
+ u8 flags = 0;
+
+ switch (sa_frame->flags & (SAU_FLG_INV | SAU_FLG_TX)) {
+ case 0:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, sa_frame->fast_sa_index);
+ break;
+ case 1:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, sa_frame->fast_sa_index);
+ flags |= SA_FLAG_INVALIDATE;
+ break;
+ case 2:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, sa_frame->fast_sa_index);
+ flags |= SA_FLAG_TX;
+ break;
+ case 3:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, sa_frame->fast_sa_index);
+ flags |= SA_FLAG_TX | SA_FLAG_INVALIDATE;
+ break;
+ }
+
+ sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
+ sa_update_iocb->entry_count = 1;
+ sa_update_iocb->sys_define = 0;
+ sa_update_iocb->entry_status = 0;
+ sa_update_iocb->handle = sp->handle;
+ sa_update_iocb->u.nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
+ sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+
+ sa_update_iocb->flags = flags;
+ sa_update_iocb->salt = cpu_to_le32(sa_frame->salt);
+ sa_update_iocb->spi = cpu_to_le32(sa_frame->spi);
+ sa_update_iocb->sa_index = cpu_to_le16(sa_frame->fast_sa_index);
+
+ sa_update_iocb->sa_control |= SA_CNTL_ENC_FCSP;
+ if (sp->fcport->edif.aes_gmac)
+ sa_update_iocb->sa_control |= SA_CNTL_AES_GMAC;
+
+ if (sa_frame->flags & SAU_FLG_KEY256) {
+ sa_update_iocb->sa_control |= SA_CNTL_KEY256;
+ for (itr = 0; itr < 32; itr++)
+ sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
+ } else {
+ sa_update_iocb->sa_control |= SA_CNTL_KEY128;
+ for (itr = 0; itr < 16; itr++)
+ sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x921d,
+ "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x hdl=%x gmac %d\n",
+ __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
+ sa_update_iocb->port_id[0], sa_update_iocb->flags, sa_update_iocb->sa_index,
+ sa_update_iocb->sa_control, sa_update_iocb->spi, sa_frame->flags, sp->handle,
+ sp->fcport->edif.aes_gmac);
+
+ if (sa_frame->flags & SAU_FLG_TX)
+ sp->fcport->edif.tx_sa_pending = 1;
+ else
+ sp->fcport->edif.rx_sa_pending = 1;
+
+ sp->fcport->vha->qla_stats.control_requests++;
+}
+
+void
+qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
+{
+ struct scsi_qla_host *vha = sp->vha;
+ struct srb_iocb *srb_iocb = &sp->u.iocb_cmd;
+ struct edif_sa_ctl *sa_ctl = srb_iocb->u.sa_update.sa_ctl;
+ uint16_t nport_handle = sp->fcport->loop_id;
+
+ sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
+ sa_update_iocb->entry_count = 1;
+ sa_update_iocb->sys_define = 0;
+ sa_update_iocb->entry_status = 0;
+ sa_update_iocb->handle = sp->handle;
+
+ sa_update_iocb->u.nport_handle = cpu_to_le16(nport_handle);
+
+ sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
+ sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+
+ /* Invalidate the index. salt, spi, control & key are ignore */
+ sa_update_iocb->flags = SA_FLAG_INVALIDATE;
+ sa_update_iocb->salt = 0;
+ sa_update_iocb->spi = 0;
+ sa_update_iocb->sa_index = cpu_to_le16(sa_ctl->index);
+ sa_update_iocb->sa_control = 0;
+
+ ql_dbg(ql_dbg_edif, vha, 0x921d,
+ "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n",
+ __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
+ sa_update_iocb->port_id[0], nport_handle, sa_update_iocb->flags,
+ sa_update_iocb->sa_index, sp->handle);
+
+ sp->fcport->vha->qla_stats.control_requests++;
+}
+
+void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
+{
+ struct purex_entry_24xx *p = *pkt;
+ struct enode *ptr;
+ int sid;
+ u16 totlen;
+ struct purexevent *purex;
+ struct scsi_qla_host *host = NULL;
+ int rc;
+ struct fc_port *fcport;
+ struct qla_els_pt_arg a;
+ be_id_t beid;
+
+ memset(&a, 0, sizeof(a));
+
+ a.els_opcode = ELS_AUTH_ELS;
+ a.nport_handle = p->nport_handle;
+ a.rx_xchg_address = p->rx_xchg_addr;
+ a.did.b.domain = p->s_id[2];
+ a.did.b.area = p->s_id[1];
+ a.did.b.al_pa = p->s_id[0];
+ a.tx_byte_count = a.tx_len = sizeof(struct fc_els_ls_rjt);
+ a.tx_addr = vha->hw->elsrej.cdma;
+ a.vp_idx = vha->vp_idx;
+ a.control_flags = EPD_ELS_RJT;
+
+ sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16);
+
+ totlen = (le16_to_cpu(p->frame_size) & 0x0fff) - PURX_ELS_HEADER_SIZE;
+ if (le16_to_cpu(p->status_flags) & 0x8000) {
+ totlen = le16_to_cpu(p->trunc_frame_size);
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return;
+ }
+
+ if (totlen > MAX_PAYLOAD) {
+ ql_dbg(ql_dbg_edif, vha, 0x0910d,
+ "%s WARNING: verbose ELS frame received (totlen=%x)\n",
+ __func__, totlen);
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return;
+ }
+
+ if (!vha->hw->flags.edif_enabled) {
+ /* edif support not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x910e, "%s edif not enabled\n",
+ __func__);
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return;
+ }
+
+ ptr = qla_enode_alloc(vha, N_PUREX);
+ if (!ptr) {
+ ql_dbg(ql_dbg_edif, vha, 0x09109,
+ "WARNING: enode alloc failed for sid=%x\n",
+ sid);
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return;
+ }
+
+ purex = &ptr->u.purexinfo;
+ purex->pur_info.pur_sid = a.did;
+ purex->pur_info.pur_pend = 0;
+ purex->pur_info.pur_bytes_rcvd = totlen;
+ purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr);
+ purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle);
+ purex->pur_info.pur_did.b.domain = p->d_id[2];
+ purex->pur_info.pur_did.b.area = p->d_id[1];
+ purex->pur_info.pur_did.b.al_pa = p->d_id[0];
+ purex->pur_info.vp_idx = p->vp_idx;
+
+ rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp,
+ purex->msgp_len);
+ if (rc) {
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ qla_enode_free(vha, ptr);
+ return;
+ }
+ beid.al_pa = purex->pur_info.pur_did.b.al_pa;
+ beid.area = purex->pur_info.pur_did.b.area;
+ beid.domain = purex->pur_info.pur_did.b.domain;
+ host = qla_find_host_by_d_id(vha, beid);
+ if (!host) {
+ ql_log(ql_log_fatal, vha, 0x508b,
+ "%s Drop ELS due to unable to find host %06x\n",
+ __func__, purex->pur_info.pur_did.b24);
+
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ qla_enode_free(vha, ptr);
+ return;
+ }
+
+ fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
+
+ if (host->e_dbell.db_flags != EDB_ACTIVE ||
+ (fcport && EDIF_SESSION_DOWN(fcport))) {
+ ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
+ __func__, host->e_dbell.db_flags,
+ fcport ? fcport->d_id.b24 : 0);
+
+ qla_els_reject_iocb(host, (*rsp)->qpair, &a);
+ qla_enode_free(host, ptr);
+ return;
+ }
+
+ /* add the local enode to the list */
+ qla_enode_add(host, ptr);
+
+ ql_dbg(ql_dbg_edif, host, 0x0910c,
+ "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n",
+ __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24,
+ purex->pur_info.pur_did.b24, p->rx_xchg_addr);
+
+ qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL);
+}
+
+static uint16_t qla_edif_get_sa_index_from_freepool(fc_port_t *fcport, int dir)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ void *sa_id_map;
+ unsigned long flags = 0;
+ u16 sa_index;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "%s: entry\n", __func__);
+
+ if (dir)
+ sa_id_map = ha->edif_tx_sa_id_map;
+ else
+ sa_id_map = ha->edif_rx_sa_id_map;
+
+ spin_lock_irqsave(&ha->sadb_fp_lock, flags);
+ sa_index = find_first_zero_bit(sa_id_map, EDIF_NUM_SA_INDEX);
+ if (sa_index >= EDIF_NUM_SA_INDEX) {
+ spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
+ return INVALID_EDIF_SA_INDEX;
+ }
+ set_bit(sa_index, sa_id_map);
+ spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
+
+ if (dir)
+ sa_index += EDIF_TX_SA_INDEX_BASE;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: index retrieved from free pool %d\n", __func__, sa_index);
+
+ return sa_index;
+}
+
+/* find an sadb entry for an nport_handle */
+static struct edif_sa_index_entry *
+qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
+ struct list_head *sa_list)
+{
+ struct edif_sa_index_entry *entry;
+ struct edif_sa_index_entry *tentry;
+ struct list_head *indx_list = sa_list;
+
+ list_for_each_entry_safe(entry, tentry, indx_list, next) {
+ if (entry->handle == nport_handle)
+ return entry;
+ }
+ return NULL;
+}
+
+/* remove an sa_index from the nport_handle and return it to the free pool */
+static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
+ uint16_t sa_index)
+{
+ struct edif_sa_index_entry *entry;
+ struct list_head *sa_list;
+ int dir = (sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
+ int slot = 0;
+ int free_slot_count = 0;
+ scsi_qla_host_t *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: entry\n", __func__);
+
+ if (dir)
+ sa_list = &ha->sadb_tx_index_list;
+ else
+ sa_list = &ha->sadb_rx_index_list;
+
+ entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
+ if (!entry) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: no entry found for nport_handle 0x%x\n",
+ __func__, nport_handle);
+ return -1;
+ }
+
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ /*
+ * each tx/rx direction has up to 2 sa indexes/slots. 1 slot for in flight traffic
+ * the other is use at re-key time.
+ */
+ for (slot = 0; slot < 2; slot++) {
+ if (entry->sa_pair[slot].sa_index == sa_index) {
+ entry->sa_pair[slot].sa_index = INVALID_EDIF_SA_INDEX;
+ entry->sa_pair[slot].spi = 0;
+ free_slot_count++;
+ qla_edif_add_sa_index_to_freepool(fcport, dir, sa_index);
+ } else if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
+ free_slot_count++;
+ }
+ }
+
+ if (free_slot_count == 2) {
+ list_del(&entry->next);
+ kfree(entry);
+ }
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_index %d removed, free_slot_count: %d\n",
+ __func__, sa_index, free_slot_count);
+
+ return 0;
+}
+
+void
+qla28xx_sa_update_iocb_entry(scsi_qla_host_t *v, struct req_que *req,
+ struct sa_update_28xx *pkt)
+{
+ const char *func = "SA_UPDATE_RESPONSE_IOCB";
+ srb_t *sp;
+ struct edif_sa_ctl *sa_ctl;
+ int old_sa_deleted = 1;
+ uint16_t nport_handle;
+ struct scsi_qla_host *vha;
+
+ sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
+
+ if (!sp) {
+ ql_dbg(ql_dbg_edif, v, 0x3063,
+ "%s: no sp found for pkt\n", __func__);
+ return;
+ }
+ /* use sp->vha due to npiv */
+ vha = sp->vha;
+
+ switch (pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) {
+ case 0:
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, pkt->sa_index);
+ break;
+ case 1:
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, pkt->sa_index);
+ break;
+ case 2:
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, pkt->sa_index);
+ break;
+ case 3:
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, pkt->sa_index);
+ break;
+ }
+
+ /*
+ * dig the nport handle out of the iocb, fcport->loop_id can not be trusted
+ * to be correct during cleanup sa_update iocbs.
+ */
+ nport_handle = sp->fcport->loop_id;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: %8phN comp status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x pkt_flags %xh hdl=%x\n",
+ __func__, sp->fcport->port_name, pkt->u.comp_sts, pkt->old_sa_info, pkt->new_sa_info,
+ nport_handle, pkt->sa_index, pkt->flags, sp->handle);
+
+ /* if rx delete, remove the timer */
+ if ((pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) == SA_FLAG_INVALIDATE) {
+ struct edif_list_entry *edif_entry;
+
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
+ edif_entry = qla_edif_list_find_sa_index(sp->fcport, nport_handle);
+ if (edif_entry) {
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: removing edif_entry %p, new sa_index: 0x%x\n",
+ __func__, edif_entry, pkt->sa_index);
+ qla_edif_list_delete_sa_index(sp->fcport, edif_entry);
+ del_timer(&edif_entry->timer);
+
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: releasing edif_entry %p, new sa_index: 0x%x\n",
+ __func__, edif_entry, pkt->sa_index);
+
+ kfree(edif_entry);
+ }
+ }
+
+ /*
+ * if this is a delete for either tx or rx, make sure it succeeded.
+ * The new_sa_info field should be 0xffff on success
+ */
+ if (pkt->flags & SA_FLAG_INVALIDATE)
+ old_sa_deleted = (le16_to_cpu(pkt->new_sa_info) == 0xffff) ? 1 : 0;
+
+ /* Process update and delete the same way */
+
+ /* If this is an sadb cleanup delete, bypass sending events to IPSEC */
+ if (sp->flags & SRB_EDIF_CLEANUP_DELETE) {
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: nph 0x%x, sa_index %d removed from fw\n",
+ __func__, sp->fcport->loop_id, pkt->sa_index);
+
+ } else if ((pkt->entry_status == 0) && (pkt->u.comp_sts == 0) &&
+ old_sa_deleted) {
+ /*
+ * Note: Wa are only keeping track of latest SA,
+ * so we know when we can start enableing encryption per I/O.
+ * If all SA's get deleted, let FW reject the IOCB.
+
+ * TODO: edif: don't set enabled here I think
+ * TODO: edif: prli complete is where it should be set
+ */
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "SA(%x)updated for s_id %02x%02x%02x\n",
+ pkt->new_sa_info,
+ pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
+ sp->fcport->edif.enable = 1;
+ if (pkt->flags & SA_FLAG_TX) {
+ sp->fcport->edif.tx_sa_set = 1;
+ sp->fcport->edif.tx_sa_pending = 0;
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ QL_VND_SA_STAT_SUCCESS,
+ QL_VND_TX_SA_KEY, sp->fcport);
+ } else {
+ sp->fcport->edif.rx_sa_set = 1;
+ sp->fcport->edif.rx_sa_pending = 0;
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ QL_VND_SA_STAT_SUCCESS,
+ QL_VND_RX_SA_KEY, sp->fcport);
+ }
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n",
+ __func__, sp->fcport->port_name, pkt->sa_index, pkt->new_sa_info,
+ pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
+
+ if (pkt->flags & SA_FLAG_TX)
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
+ QL_VND_TX_SA_KEY, sp->fcport);
+ else
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
+ QL_VND_RX_SA_KEY, sp->fcport);
+ }
+
+ /* for delete, release sa_ctl, sa_index */
+ if (pkt->flags & SA_FLAG_INVALIDATE) {
+ /* release the sa_ctl */
+ sa_ctl = qla_edif_find_sa_ctl_by_index(sp->fcport,
+ le16_to_cpu(pkt->sa_index), (pkt->flags & SA_FLAG_TX));
+ if (sa_ctl &&
+ qla_edif_find_sa_ctl_by_index(sp->fcport, sa_ctl->index,
+ (pkt->flags & SA_FLAG_TX)) != NULL) {
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "%s: freeing sa_ctl for index %d\n",
+ __func__, sa_ctl->index);
+ qla_edif_free_sa_ctl(sp->fcport, sa_ctl, sa_ctl->index);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_ctl NOT freed, sa_ctl: %p\n",
+ __func__, sa_ctl);
+ }
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_index %d, nph: 0x%x\n",
+ __func__, le16_to_cpu(pkt->sa_index), nport_handle);
+ qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
+ le16_to_cpu(pkt->sa_index));
+ /*
+ * check for a failed sa_update and remove
+ * the sadb entry.
+ */
+ } else if (pkt->u.comp_sts) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_index %d, nph: 0x%x\n",
+ __func__, pkt->sa_index, nport_handle);
+ qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
+ le16_to_cpu(pkt->sa_index));
+ switch (le16_to_cpu(pkt->u.comp_sts)) {
+ case CS_PORT_EDIF_UNAVAIL:
+ case CS_PORT_EDIF_LOGOUT:
+ qlt_schedule_sess_for_deletion(sp->fcport);
+ break;
+ default:
+ break;
+ }
+ }
+
+ sp->done(sp, 0);
+}
+
+/**
+ * qla28xx_start_scsi_edif() - Send a SCSI type 6 command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Return: non-zero if a failure occurred, else zero.
+ */
+int
+qla28xx_start_scsi_edif(srb_t *sp)
+{
+ int nseg;
+ unsigned long flags;
+ struct scsi_cmnd *cmd;
+ uint32_t *clr_ptr;
+ uint32_t index, i;
+ uint32_t handle;
+ uint16_t cnt;
+ int16_t req_cnt;
+ uint16_t tot_dsds;
+ __be32 *fcp_dl;
+ uint8_t additional_cdb_len;
+ struct ct6_dsd *ctx;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct cmd_type_6 *cmd_pkt;
+ struct dsd64 *cur_dsd;
+ uint8_t avail_dsds = 0;
+ struct scatterlist *sg;
+ struct req_que *req = sp->qpair->req;
+ spinlock_t *lock = sp->qpair->qp_lock_ptr;
+
+ /* Setup device pointers. */
+ cmd = GET_CMD_SP(sp);
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, sp->qpair, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x300c,
+ "qla2x00_marker failed for cmd=%p.\n", cmd);
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else {
+ nseg = 0;
+ }
+
+ tot_dsds = nseg;
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ rd_reg_dword(req->req_q_out);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ ctx = sp->u.scmd.ct6_ctx =
+ mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+ if (!ctx) {
+ ql_log(ql_log_fatal, vha, 0x3010,
+ "Failed to allocate ctx for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ memset(ctx, 0, sizeof(struct ct6_dsd));
+ ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
+ GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+ if (!ctx->fcp_cmnd) {
+ ql_log(ql_log_fatal, vha, 0x3011,
+ "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ /* Initialize the DSD list and dma handle */
+ INIT_LIST_HEAD(&ctx->dsd_list);
+ ctx->dsd_use_cnt = 0;
+
+ if (cmd->cmd_len > 16) {
+ additional_cdb_len = cmd->cmd_len - 16;
+ if ((cmd->cmd_len % 4) != 0) {
+ /*
+ * SCSI command bigger than 16 bytes must be
+ * multiple of 4
+ */
+ ql_log(ql_log_warn, vha, 0x3012,
+ "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
+ cmd->cmd_len, cmd);
+ goto queuing_error_fcp_cmnd;
+ }
+ ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_cdb_len = 0;
+ ctx->fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+ cmd_pkt->handle = make_handle(req->id, handle);
+
+ /*
+ * Zero out remaining portion of packet.
+ * tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = cpu_to_le32(0);
+ goto no_dsds;
+ }
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_requests++;
+ sp->fcport->edif.tx_bytes += scsi_bufflen(cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_requests++;
+ sp->fcport->edif.rx_bytes += scsi_bufflen(cmd);
+ }
+
+ cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
+ cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA));
+
+ /* One DSD is available in the Command Type 6 IOCB */
+ avail_dsds = 1;
+ cur_dsd = &cmd_pkt->fcp_dsd;
+
+ /* Load data segments */
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Continuation
+ * Type 1 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
+ cur_dsd = cont_pkt->dsd;
+ avail_dsds = 5;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ put_unaligned_le64(sle_dma, &cur_dsd->address);
+ cur_dsd->length = cpu_to_le32(sg_dma_len(sg));
+ cur_dsd++;
+ avail_dsds--;
+ }
+
+no_dsds:
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
+
+ cmd_pkt->entry_type = COMMAND_TYPE_6;
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* build FCP_CMND IU */
+ int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ ctx->fcp_cmnd->task_attribute |=
+ sp->fcport->fcp_prio << 3;
+
+ memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+ fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
+ additional_cdb_len);
+ *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+ put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address);
+
+ sp->flags |= SRB_FCP_CMND_DMA_VALID;
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ cmd_pkt->entry_status = 0;
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ /* Adjust ring index. */
+ wmb();
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+
+ sp->qpair->cmd_cnt++;
+ /* Set chip new ring index. */
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ if (sp->u.scmd.ct6_ctx) {
+ mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
+ sp->u.scmd.ct6_ctx = NULL;
+ }
+ spin_unlock_irqrestore(lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
+/**********************************************
+ * edif update/delete sa_index list functions *
+ **********************************************/
+
+/* clear the edif_indx_list for this port */
+void qla_edif_list_del(fc_port_t *fcport)
+{
+ struct edif_list_entry *indx_lst;
+ struct edif_list_entry *tindx_lst;
+ struct list_head *indx_list = &fcport->edif.edif_indx_list;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+ list_for_each_entry_safe(indx_lst, tindx_lst, indx_list, next) {
+ list_del(&indx_lst->next);
+ kfree(indx_lst);
+ }
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+}
+
+/******************
+ * SADB functions *
+ ******************/
+
+/* allocate/retrieve an sa_index for a given spi */
+static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
+ struct qla_sa_update_frame *sa_frame)
+{
+ struct edif_sa_index_entry *entry;
+ struct list_head *sa_list;
+ uint16_t sa_index;
+ int dir = sa_frame->flags & SAU_FLG_TX;
+ int slot = 0;
+ int free_slot = -1;
+ scsi_qla_host_t *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+ uint16_t nport_handle = fcport->loop_id;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: entry fc_port: %p, nport_handle: 0x%x\n",
+ __func__, fcport, nport_handle);
+
+ if (dir)
+ sa_list = &ha->sadb_tx_index_list;
+ else
+ sa_list = &ha->sadb_rx_index_list;
+
+ entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
+ if (!entry) {
+ if ((sa_frame->flags & (SAU_FLG_TX | SAU_FLG_INV)) == SAU_FLG_INV) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: rx delete request with no entry\n", __func__);
+ return RX_DELETE_NO_EDIF_SA_INDEX;
+ }
+
+ /* if there is no entry for this nport, add one */
+ entry = kzalloc((sizeof(struct edif_sa_index_entry)), GFP_ATOMIC);
+ if (!entry)
+ return INVALID_EDIF_SA_INDEX;
+
+ sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
+ if (sa_index == INVALID_EDIF_SA_INDEX) {
+ kfree(entry);
+ return INVALID_EDIF_SA_INDEX;
+ }
+
+ INIT_LIST_HEAD(&entry->next);
+ entry->handle = nport_handle;
+ entry->fcport = fcport;
+ entry->sa_pair[0].spi = sa_frame->spi;
+ entry->sa_pair[0].sa_index = sa_index;
+ entry->sa_pair[1].spi = 0;
+ entry->sa_pair[1].sa_index = INVALID_EDIF_SA_INDEX;
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ list_add_tail(&entry->next, sa_list);
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning sa_index %d\n",
+ __func__, nport_handle, sa_frame->spi, sa_index);
+
+ return sa_index;
+ }
+
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+
+ /* see if we already have an entry for this spi */
+ for (slot = 0; slot < 2; slot++) {
+ if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
+ free_slot = slot;
+ } else {
+ if (entry->sa_pair[slot].spi == sa_frame->spi) {
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, sa_index %d\n",
+ __func__, slot, entry->handle, sa_frame->spi,
+ entry->sa_pair[slot].sa_index);
+ return entry->sa_pair[slot].sa_index;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+
+ /* both slots are used */
+ if (free_slot == -1) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n",
+ __func__, entry->handle, sa_frame->spi);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: Slot 0 spi: 0x%x sa_index: %d, Slot 1 spi: 0x%x sa_index: %d\n",
+ __func__, entry->sa_pair[0].spi, entry->sa_pair[0].sa_index,
+ entry->sa_pair[1].spi, entry->sa_pair[1].sa_index);
+
+ return INVALID_EDIF_SA_INDEX;
+ }
+
+ /* there is at least one free slot, use it */
+ sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
+ if (sa_index == INVALID_EDIF_SA_INDEX) {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
+ "%s: empty freepool!!\n", __func__);
+ return INVALID_EDIF_SA_INDEX;
+ }
+
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ entry->sa_pair[free_slot].spi = sa_frame->spi;
+ entry->sa_pair[free_slot].sa_index = sa_index;
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
+ "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n",
+ __func__, free_slot, entry->handle, sa_frame->spi, sa_index);
+
+ return sa_index;
+}
+
+/* release any sadb entries -- only done at teardown */
+void qla_edif_sadb_release(struct qla_hw_data *ha)
+{
+ struct list_head *pos;
+ struct list_head *tmp;
+ struct edif_sa_index_entry *entry;
+
+ list_for_each_safe(pos, tmp, &ha->sadb_rx_index_list) {
+ entry = list_entry(pos, struct edif_sa_index_entry, next);
+ list_del(&entry->next);
+ kfree(entry);
+ }
+
+ list_for_each_safe(pos, tmp, &ha->sadb_tx_index_list) {
+ entry = list_entry(pos, struct edif_sa_index_entry, next);
+ list_del(&entry->next);
+ kfree(entry);
+ }
+}
+
+/**************************
+ * sadb freepool functions
+ **************************/
+
+/* build the rx and tx sa_index free pools -- only done at fcport init */
+int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha)
+{
+ ha->edif_tx_sa_id_map =
+ kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
+
+ if (!ha->edif_tx_sa_id_map) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
+ "Unable to allocate memory for sadb tx.\n");
+ return -ENOMEM;
+ }
+
+ ha->edif_rx_sa_id_map =
+ kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
+ if (!ha->edif_rx_sa_id_map) {
+ kfree(ha->edif_tx_sa_id_map);
+ ha->edif_tx_sa_id_map = NULL;
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
+ "Unable to allocate memory for sadb rx.\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* release the free pool - only done during fcport teardown */
+void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha)
+{
+ kfree(ha->edif_tx_sa_id_map);
+ ha->edif_tx_sa_id_map = NULL;
+ kfree(ha->edif_rx_sa_id_map);
+ ha->edif_rx_sa_id_map = NULL;
+}
+
+static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
+ fc_port_t *fcport, uint32_t handle, uint16_t sa_index)
+{
+ struct edif_list_entry *edif_entry;
+ struct edif_sa_ctl *sa_ctl;
+ uint16_t delete_sa_index = INVALID_EDIF_SA_INDEX;
+ unsigned long flags = 0;
+ uint16_t nport_handle = fcport->loop_id;
+ uint16_t cached_nport_handle;
+
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+ edif_entry = qla_edif_list_find_sa_index(fcport, nport_handle);
+ if (!edif_entry) {
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ return; /* no pending delete for this handle */
+ }
+
+ /*
+ * check for no pending delete for this index or iocb does not
+ * match rx sa_index
+ */
+ if (edif_entry->delete_sa_index == INVALID_EDIF_SA_INDEX ||
+ edif_entry->update_sa_index != sa_index) {
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ return;
+ }
+
+ /*
+ * wait until we have seen at least EDIF_DELAY_COUNT transfers before
+ * queueing RX delete
+ */
+ if (edif_entry->count++ < EDIF_RX_DELETE_FILTER_COUNT) {
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ return;
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: invalidating delete_sa_index, update_sa_index: 0x%x sa_index: 0x%x, delete_sa_index: 0x%x\n",
+ __func__, edif_entry->update_sa_index, sa_index, edif_entry->delete_sa_index);
+
+ delete_sa_index = edif_entry->delete_sa_index;
+ edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
+ cached_nport_handle = edif_entry->handle;
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+
+ /* sanity check on the nport handle */
+ if (nport_handle != cached_nport_handle) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n",
+ __func__, nport_handle, cached_nport_handle);
+ }
+
+ /* find the sa_ctl for the delete and schedule the delete */
+ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0);
+ if (sa_ctl) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n",
+ __func__, sa_ctl, sa_index);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n",
+ delete_sa_index,
+ edif_entry->update_sa_index, nport_handle, handle);
+
+ sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
+ set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
+ qla_post_sa_replace_work(fcport->vha, fcport,
+ nport_handle, sa_ctl);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n",
+ __func__, delete_sa_index);
+ }
+}
+
+void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
+ srb_t *sp, struct sts_entry_24xx *sts24)
+{
+ fc_port_t *fcport = sp->fcport;
+ /* sa_index used by this iocb */
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ uint32_t handle;
+
+ handle = (uint32_t)LSW(sts24->handle);
+
+ /* find out if this status iosb is for a scsi read */
+ if (cmd->sc_data_direction != DMA_FROM_DEVICE)
+ return;
+
+ return __chk_edif_rx_sa_delete_pending(vha, fcport, handle,
+ le16_to_cpu(sts24->edif_sa_index));
+}
+
+void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct ctio7_from_24xx *pkt)
+{
+ __chk_edif_rx_sa_delete_pending(vha, fcport,
+ pkt->handle, le16_to_cpu(pkt->edif_sa_index));
+}
+
+static void qla_parse_auth_els_ctl(struct srb *sp)
+{
+ struct qla_els_pt_arg *a = &sp->u.bsg_cmd.u.els_arg;
+ struct bsg_job *bsg_job = sp->u.bsg_cmd.bsg_job;
+ struct fc_bsg_request *request = bsg_job->request;
+ struct qla_bsg_auth_els_request *p =
+ (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+ a->tx_len = a->tx_byte_count = sp->remap.req.len;
+ a->tx_addr = sp->remap.req.dma;
+ a->rx_len = a->rx_byte_count = sp->remap.rsp.len;
+ a->rx_addr = sp->remap.rsp.dma;
+
+ if (p->e.sub_cmd == SEND_ELS_REPLY) {
+ a->control_flags = p->e.extra_control_flags << 13;
+ a->rx_xchg_address = cpu_to_le32(p->e.extra_rx_xchg_address);
+ if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_ACC)
+ a->els_opcode = ELS_LS_ACC;
+ else if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_RJT)
+ a->els_opcode = ELS_LS_RJT;
+ }
+ a->did = sp->fcport->d_id;
+ a->els_opcode = request->rqst_data.h_els.command_code;
+ a->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ a->vp_idx = sp->vha->vp_idx;
+}
+
+int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ fc_port_t *fcport = NULL;
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ int rval = (DID_ERROR << 16);
+ port_id_t d_id;
+ struct qla_bsg_auth_els_request *p =
+ (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+ d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2];
+ d_id.b.area = bsg_request->rqst_data.h_els.port_id[1];
+ d_id.b.domain = bsg_request->rqst_data.h_els.port_id[0];
+
+ /* find matching d_id in fcport list */
+ fcport = qla2x00_find_fcport_by_pid(vha, &d_id);
+ if (!fcport) {
+ ql_dbg(ql_dbg_edif, vha, 0x911a,
+ "%s fcport not find online portid=%06x.\n",
+ __func__, d_id.b24);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ return -EIO;
+ }
+
+ if (qla_bsg_check(vha, bsg_job, fcport))
+ return 0;
+
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ ql_dbg(ql_dbg_edif, vha, 0x910d,
+ "%s ELS code %x, no loop id.\n", __func__,
+ bsg_request->rqst_data.r_els.els_code);
+ SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
+ return -ENXIO;
+ }
+
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
+ SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
+ rval = -EIO;
+ goto done;
+ }
+
+ /* pass through is supported only for ISP 4Gb or higher */
+ if (!IS_FWI2_CAPABLE(ha)) {
+ ql_dbg(ql_dbg_user, vha, 0x7001,
+ "ELS passthru not supported for ISP23xx based adapters.\n");
+ SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
+ rval = -EPERM;
+ goto done;
+ }
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ ql_dbg(ql_dbg_user, vha, 0x7004,
+ "Failed get sp pid=%06x\n", fcport->d_id.b24);
+ rval = -ENOMEM;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done;
+ }
+
+ sp->remap.req.len = bsg_job->request_payload.payload_len;
+ sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool,
+ GFP_KERNEL, &sp->remap.req.dma);
+ if (!sp->remap.req.buf) {
+ ql_dbg(ql_dbg_user, vha, 0x7005,
+ "Failed allocate request dma len=%x\n",
+ bsg_job->request_payload.payload_len);
+ rval = -ENOMEM;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done_free_sp;
+ }
+
+ sp->remap.rsp.len = bsg_job->reply_payload.payload_len;
+ sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool,
+ GFP_KERNEL, &sp->remap.rsp.dma);
+ if (!sp->remap.rsp.buf) {
+ ql_dbg(ql_dbg_user, vha, 0x7006,
+ "Failed allocate response dma len=%x\n",
+ bsg_job->reply_payload.payload_len);
+ rval = -ENOMEM;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done_free_remap_req;
+ }
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, sp->remap.req.buf,
+ sp->remap.req.len);
+ sp->remap.remapped = true;
+
+ sp->type = SRB_ELS_CMD_HST_NOLOGIN;
+ sp->name = "SPCN_BSG_HST_NOLOGIN";
+ sp->u.bsg_cmd.bsg_job = bsg_job;
+ qla_parse_auth_els_ctl(sp);
+
+ sp->free = qla2x00_bsg_sp_free;
+ sp->done = qla2x00_bsg_job_done;
+
+ rval = qla2x00_start_sp(sp);
+
+ ql_dbg(ql_dbg_edif, vha, 0x700a,
+ "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
+ __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
+ p->e.extra_rx_xchg_address, p->e.extra_control_flags,
+ sp->handle, sp->remap.req.len, bsg_job);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x700e,
+ "qla2x00_start_sp failed = %d\n", rval);
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ rval = -EIO;
+ goto done_free_remap_rsp;
+ }
+ return rval;
+
+done_free_remap_rsp:
+ dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
+ sp->remap.rsp.dma);
+done_free_remap_req:
+ dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
+ sp->remap.req.dma);
+done_free_sp:
+ qla2x00_rel_sp(sp);
+
+done:
+ return rval;
+}
+
+void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
+{
+ if (sess->edif.app_sess_online && vha->e_dbell.db_flags & EDB_ACTIVE) {
+ ql_dbg(ql_dbg_disc, vha, 0xf09c,
+ "%s: sess %8phN send port_offline event\n",
+ __func__, sess->port_name);
+ sess->edif.app_sess_online = 0;
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN,
+ sess->d_id.b24, 0, sess);
+ qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h
new file mode 100644
index 000000000000..9e8f28d0caa1
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_edif.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Marvell Fibre Channel HBA Driver
+ * Copyright (c) 2021 Marvell
+ */
+#ifndef __QLA_EDIF_H
+#define __QLA_EDIF_H
+
+struct qla_scsi_host;
+#define EDIF_APP_ID 0x73730001
+
+#define EDIF_MAX_INDEX 2048
+struct edif_sa_ctl {
+ struct list_head next;
+ uint16_t del_index;
+ uint16_t index;
+ uint16_t slot;
+ uint16_t flags;
+#define EDIF_SA_CTL_FLG_REPL BIT_0
+#define EDIF_SA_CTL_FLG_DEL BIT_1
+#define EDIF_SA_CTL_FLG_CLEANUP_DEL BIT_4
+ // Invalidate Index bit and mirrors QLA_SA_UPDATE_FLAGS_DELETE
+ unsigned long state;
+#define EDIF_SA_CTL_USED 1 /* Active Sa update */
+#define EDIF_SA_CTL_PEND 2 /* Waiting for slot */
+#define EDIF_SA_CTL_REPL 3 /* Active Replace and Delete */
+#define EDIF_SA_CTL_DEL 4 /* Delete Pending */
+ struct fc_port *fcport;
+ struct bsg_job *bsg_job;
+ struct qla_sa_update_frame sa_frame;
+};
+
+enum enode_flags_t {
+ ENODE_ACTIVE = 0x1,
+};
+
+struct pur_core {
+ enum enode_flags_t enode_flags;
+ spinlock_t pur_lock;
+ struct list_head head;
+};
+
+enum db_flags_t {
+ EDB_ACTIVE = 0x1,
+};
+
+struct edif_dbell {
+ enum db_flags_t db_flags;
+ spinlock_t db_lock;
+ struct list_head head;
+ struct completion dbell;
+};
+
+#define SA_UPDATE_IOCB_TYPE 0x71 /* Security Association Update IOCB entry */
+struct sa_update_28xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System Defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* IOCB System handle. */
+
+ union {
+ __le16 nport_handle; /* in: N_PORT handle. */
+ __le16 comp_sts; /* out: completion status */
+#define CS_PORT_EDIF_UNAVAIL 0x28
+#define CS_PORT_EDIF_LOGOUT 0x29
+#define CS_PORT_EDIF_SUPP_NOT_RDY 0x64
+#define CS_PORT_EDIF_INV_REQ 0x66
+ } u;
+ uint8_t vp_index;
+ uint8_t reserved_1;
+ uint8_t port_id[3];
+ uint8_t flags;
+#define SA_FLAG_INVALIDATE BIT_0
+#define SA_FLAG_TX BIT_1 // 1=tx, 0=rx
+
+ uint8_t sa_key[32]; /* 256 bit key */
+ __le32 salt;
+ __le32 spi;
+ uint8_t sa_control;
+#define SA_CNTL_ENC_FCSP (1 << 3)
+#define SA_CNTL_ENC_OPD (2 << 3)
+#define SA_CNTL_ENC_MSK (3 << 3) // mask bits 4,3
+#define SA_CNTL_AES_GMAC (1 << 2)
+#define SA_CNTL_KEY256 (2 << 0)
+#define SA_CNTL_KEY128 0
+
+ uint8_t reserved_2;
+ __le16 sa_index; // reserve: bit 11-15
+ __le16 old_sa_info;
+ __le16 new_sa_info;
+};
+
+#define NUM_ENTRIES 256
+#define MAX_PAYLOAD 1024
+#define PUR_GET 1
+
+struct dinfo {
+ int nodecnt;
+ int lstate;
+};
+
+struct pur_ninfo {
+ unsigned int pur_pend:1;
+ port_id_t pur_sid;
+ port_id_t pur_did;
+ uint8_t vp_idx;
+ short pur_bytes_rcvd;
+ unsigned short pur_nphdl;
+ unsigned int pur_rx_xchg_address;
+};
+
+struct purexevent {
+ struct pur_ninfo pur_info;
+ unsigned char *msgp;
+ u32 msgp_len;
+};
+
+#define N_UNDEF 0
+#define N_PUREX 1
+struct enode {
+ struct list_head list;
+ struct dinfo dinfo;
+ uint32_t ntype;
+ union {
+ struct purexevent purexinfo;
+ } u;
+};
+
+#define EDIF_SESSION_DOWN(_s) \
+ (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \
+ _s->disc_state == DSC_DELETED || \
+ !_s->edif.app_sess_online))
+
+#endif /* __QLA_EDIF_H */
diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h
new file mode 100644
index 000000000000..58b718d35d19
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Marvell Fibre Channel HBA Driver
+ * Copyright (C) 2018- Marvell
+ *
+ */
+#ifndef __QLA_EDIF_BSG_H
+#define __QLA_EDIF_BSG_H
+
+/* BSG Vendor specific commands */
+#define ELS_MAX_PAYLOAD 1024
+#ifndef WWN_SIZE
+#define WWN_SIZE 8
+#endif
+#define VND_CMD_APP_RESERVED_SIZE 32
+
+enum auth_els_sub_cmd {
+ SEND_ELS = 0,
+ SEND_ELS_REPLY,
+ PULL_ELS,
+};
+
+struct extra_auth_els {
+ enum auth_els_sub_cmd sub_cmd;
+ uint32_t extra_rx_xchg_address;
+ uint8_t extra_control_flags;
+#define BSG_CTL_FLAG_INIT 0
+#define BSG_CTL_FLAG_LS_ACC 1
+#define BSG_CTL_FLAG_LS_RJT 2
+#define BSG_CTL_FLAG_TRM 3
+ uint8_t extra_rsvd[3];
+} __packed;
+
+struct qla_bsg_auth_els_request {
+ struct fc_bsg_request r;
+ struct extra_auth_els e;
+};
+
+struct qla_bsg_auth_els_reply {
+ struct fc_bsg_reply r;
+ uint32_t rx_xchg_address;
+};
+
+struct app_id {
+ int app_vid;
+ uint8_t app_key[32];
+} __packed;
+
+struct app_start_reply {
+ uint32_t host_support_edif;
+ uint32_t edif_enode_active;
+ uint32_t edif_edb_active;
+ uint32_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_start {
+ struct app_id app_info;
+ uint32_t prli_to;
+ uint32_t key_shred;
+ uint8_t app_start_flags;
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE - 1];
+} __packed;
+
+struct app_stop {
+ struct app_id app_info;
+ char buf[16];
+} __packed;
+
+struct app_plogi_reply {
+ uint32_t prli_status;
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+#define RECFG_TIME 1
+#define RECFG_BYTES 2
+
+struct app_rekey_cfg {
+ struct app_id app_info;
+ uint8_t rekey_mode;
+ port_id_t d_id;
+ uint8_t force;
+ union {
+ int64_t bytes;
+ int64_t time;
+ } rky_units;
+
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_pinfo_req {
+ struct app_id app_info;
+ uint8_t num_ports;
+ port_id_t remote_pid;
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_pinfo {
+ port_id_t remote_pid;
+ uint8_t remote_wwpn[WWN_SIZE];
+ uint8_t remote_type;
+#define VND_CMD_RTYPE_UNKNOWN 0
+#define VND_CMD_RTYPE_TARGET 1
+#define VND_CMD_RTYPE_INITIATOR 2
+ uint8_t remote_state;
+ uint8_t auth_state;
+ uint8_t rekey_mode;
+ int64_t rekey_count;
+ int64_t rekey_config_value;
+ int64_t rekey_consumed_value;
+
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+/* AUTH States */
+#define VND_CMD_AUTH_STATE_UNDEF 0
+#define VND_CMD_AUTH_STATE_SESSION_SHUTDOWN 1
+#define VND_CMD_AUTH_STATE_NEEDED 2
+#define VND_CMD_AUTH_STATE_ELS_RCVD 3
+#define VND_CMD_AUTH_STATE_SAUPDATE_COMPL 4
+
+struct app_pinfo_reply {
+ uint8_t port_count;
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ struct app_pinfo ports[0];
+} __packed;
+
+struct app_sinfo_req {
+ struct app_id app_info;
+ uint8_t num_ports;
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_sinfo {
+ uint8_t remote_wwpn[WWN_SIZE];
+ int64_t rekey_count;
+ uint8_t rekey_mode;
+ int64_t tx_bytes;
+ int64_t rx_bytes;
+} __packed;
+
+struct app_stats_reply {
+ uint8_t elem_count;
+ struct app_sinfo elem[0];
+} __packed;
+
+struct qla_sa_update_frame {
+ struct app_id app_info;
+ uint16_t flags;
+#define SAU_FLG_INV 0x01 /* delete key */
+#define SAU_FLG_TX 0x02 /* 1=tx, 0 = rx */
+#define SAU_FLG_FORCE_DELETE 0x08
+#define SAU_FLG_GMAC_MODE 0x20 /*
+ * GMAC mode is cleartext for the IO
+ * (i.e. NULL encryption)
+ */
+#define SAU_FLG_KEY128 0x40
+#define SAU_FLG_KEY256 0x80
+ uint16_t fast_sa_index:10,
+ reserved:6;
+ uint32_t salt;
+ uint32_t spi;
+ uint8_t sa_key[32];
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+ port_id_t port_id;
+} __packed;
+
+// used for edif mgmt bsg interface
+#define QL_VND_SC_UNDEF 0
+#define QL_VND_SC_SA_UPDATE 1
+#define QL_VND_SC_APP_START 2
+#define QL_VND_SC_APP_STOP 3
+#define QL_VND_SC_AUTH_OK 4
+#define QL_VND_SC_AUTH_FAIL 5
+#define QL_VND_SC_REKEY_CONFIG 6
+#define QL_VND_SC_GET_FCINFO 7
+#define QL_VND_SC_GET_STATS 8
+
+/* Application interface data structure for rtn data */
+#define EXT_DEF_EVENT_DATA_SIZE 64
+struct edif_app_dbell {
+ uint32_t event_code;
+ uint32_t event_data_size;
+ union {
+ port_id_t port_id;
+ uint8_t event_data[EXT_DEF_EVENT_DATA_SIZE];
+ };
+} __packed;
+
+struct edif_sa_update_aen {
+ port_id_t port_id;
+ uint32_t key_type; /* Tx (1) or RX (2) */
+ uint32_t status; /* 0 succes, 1 failed, 2 timeout , 3 error */
+ uint8_t reserved[16];
+} __packed;
+
+#define QL_VND_SA_STAT_SUCCESS 0
+#define QL_VND_SA_STAT_FAILED 1
+#define QL_VND_SA_STAT_TIMEOUT 2
+#define QL_VND_SA_STAT_ERROR 3
+
+#define QL_VND_RX_SA_KEY 1
+#define QL_VND_TX_SA_KEY 2
+
+/* App defines for plogi auth'd ok and plogi auth bad requests */
+struct auth_complete_cmd {
+ struct app_id app_info;
+#define PL_TYPE_WWPN 1
+#define PL_TYPE_DID 2
+ uint32_t type;
+ union {
+ uint8_t wwpn[WWN_SIZE];
+ port_id_t d_id;
+ } u;
+ uint32_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+#define RX_DELAY_DELETE_TIMEOUT 20
+
+#endif /* QLA_EDIF_BSG_H */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 49df418030e4..073d06e88c58 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -82,10 +82,11 @@ struct port_database_24xx {
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
- uint8_t reserved_3[4];
+ uint8_t reserved_3[2];
+ uint16_t nvme_first_burst_size;
uint16_t prli_nvme_svc_param_word_0; /* Bits 15-0 of word 0 */
uint16_t prli_nvme_svc_param_word_3; /* Bits 15-0 of word 3 */
- uint16_t nvme_first_burst_size;
+ uint8_t secure_login;
uint8_t reserved_4[14];
};
@@ -489,6 +490,9 @@ struct cmd_type_6 {
struct scsi_lun lun; /* FCP LUN (BE). */
__le16 control_flags; /* Control flags. */
+#define CF_NEW_SA BIT_12
+#define CF_EN_EDIF BIT_9
+#define CF_ADDITIONAL_PARAM_BLK BIT_8
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
@@ -611,6 +615,7 @@ struct sts_entry_24xx {
union {
__le16 reserved_1;
__le16 nvme_rsp_pyld_len;
+ __le16 edif_sa_index; /* edif sa_index used for initiator read data */
};
__le16 state_flags; /* State flags. */
@@ -805,6 +810,7 @@ struct els_entry_24xx {
#define EPD_RX_XCHG (3 << 13)
#define ECF_CLR_PASSTHRU_PEND BIT_12
#define ECF_INCL_FRAME_HDR BIT_11
+#define ECF_SEC_LOGIN BIT_3
union {
struct {
@@ -896,6 +902,7 @@ struct logio_entry_24xx {
#define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */
#define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */
#define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */
+#define LCF_COMMON_FEAT BIT_7 /* PLOGI - Set Common Features Field */
#define LCF_EXPL_LOGO BIT_6 /* Perform an explicit LOGO. */
#define LCF_NVME_PRLI BIT_6 /* Perform NVME FC4 PRLI */
#define LCF_SKIP_PRLI BIT_5 /* Skip PRLI after PLOGI. */
@@ -920,6 +927,8 @@ struct logio_entry_24xx {
uint8_t rsp_size; /* Response size in 32bit words. */
__le32 io_parameter[11]; /* General I/O parameters. */
+#define LIO_COMM_FEAT_FCSP BIT_21
+#define LIO_COMM_FEAT_CIO BIT_31
#define LSC_SCODE_NOLINK 0x01
#define LSC_SCODE_NOIOCB 0x02
#define LSC_SCODE_NOXCB 0x03
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 2f867da822ae..1c3f055d41b8 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -12,6 +12,7 @@
* Global Function Prototypes in qla_init.c source file.
*/
extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
+extern int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport);
extern int qla2100_pci_config(struct scsi_qla_host *);
extern int qla2300_pci_config(struct scsi_qla_host *);
@@ -130,6 +131,18 @@ void qla24xx_free_purex_item(struct purex_item *item);
extern bool qla24xx_risc_firmware_invalid(uint32_t *);
void qla_init_iocb_limit(scsi_qla_host_t *);
+void qla_edif_list_del(fc_port_t *fcport);
+void qla_edif_sadb_release(struct qla_hw_data *ha);
+int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha);
+void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha);
+void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
+ srb_t *sp, struct sts_entry_24xx *sts24);
+void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct ctio7_from_24xx *ctio);
+void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport);
+int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsgjob);
+void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess);
+const char *sc_to_str(uint16_t cmd);
/*
* Global Data in qla_os.c source file.
@@ -175,6 +188,7 @@ extern int ql2xenablemsix;
extern int qla2xuseresexchforels;
extern int ql2xdifbundlinginternalbuffers;
extern int ql2xfulldump_on_mpifail;
+extern int ql2xsecenable;
extern int ql2xenforce_iocb_limit;
extern int ql2xabts_wait_nvme;
@@ -236,6 +250,8 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
struct purex_item *pkt);
void qla_pci_set_eeh_busy(struct scsi_qla_host *);
void qla_schedule_eeh_work(struct scsi_qla_host *);
+struct edif_sa_ctl *qla_edif_find_sa_ctl_by_index(fc_port_t *fcport,
+ int index, int dir);
/*
* Global Functions in qla_mid.c source file.
@@ -280,7 +296,10 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_iocb.c source file.
*/
-
+void qla_els_pt_iocb(struct scsi_qla_host *vha,
+ struct els_entry_24xx *pkt, struct qla_els_pt_arg *a);
+cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha,
+ struct req_que *que);
extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -310,6 +329,8 @@ extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
struct dsd64 *, uint16_t, struct qla_tgt_cmd *);
extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
+extern int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha,
+ struct qla_work_evt *e);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -578,6 +599,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id);
fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t);
fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8);
fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
+void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -640,6 +662,8 @@ extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
+int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, void **pkt,
+ struct rsp_que **rsp, u8 *buf, u32 buf_len);
/*
* Global Function Prototypes in qla_dbg.c source file.
@@ -879,6 +903,9 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
dma_addr_t, size_t, uint32_t);
extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
uint16_t *, uint16_t *);
+extern int qla24xx_sadb_update(struct bsg_job *bsg_job);
+extern int qla_post_sa_replace_work(struct scsi_qla_host *vha,
+ fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl);
/* 83xx related functions */
void qla83xx_fw_dump(scsi_qla_host_t *vha);
@@ -923,6 +950,7 @@ extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *);
extern void qlt_handle_abts_recv(struct scsi_qla_host *, struct rsp_que *,
response_t *);
+struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha, be_id_t d_id);
int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
struct imm_ntfy_from_isp *, int);
void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *);
@@ -935,7 +963,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
void qla24xx_delete_sess_fn(struct work_struct *);
void qlt_unknown_atio_work_fn(struct work_struct *);
void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
-void qlt_remove_target_resources(struct qla_hw_data *);
+void qla_remove_hostmap(struct qla_hw_data *ha);
void qlt_clr_qp_table(struct scsi_qla_host *vha);
void qlt_set_mode(struct scsi_qla_host *);
int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
@@ -950,6 +978,25 @@ extern void qla_nvme_abort_process_comp_status
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
+
+/* qla_edif.c */
+fc_port_t *qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id);
+void qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, uint32_t data, uint32_t data2,
+ fc_port_t *fcport);
+void qla_edb_stop(scsi_qla_host_t *vha);
+ssize_t edif_doorbell_show(struct device *dev, struct device_attribute *attr, char *buf);
+int32_t qla_edif_app_mgmt(struct bsg_job *bsg_job);
+void qla_enode_init(scsi_qla_host_t *vha);
+void qla_enode_stop(scsi_qla_host_t *vha);
+void qla_edif_flush_sa_ctl_lists(fc_port_t *fcport);
+void qla_edb_init(scsi_qla_host_t *vha);
+void qla_edif_timer(scsi_qla_host_t *vha);
+int qla28xx_start_scsi_edif(srb_t *sp);
+void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb);
+void qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb);
+void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp);
+void qla28xx_sa_update_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct sa_update_28xx *pkt);
void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
#define QLA2XX_HW_ERROR BIT_0
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 5b6e04a91a18..ebc8fdb0b43d 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -632,7 +632,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
- if (vha->flags.nvme_enabled)
+ if (vha->flags.nvme_enabled && qla_ini_mode_enabled(vha))
ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
@@ -1730,8 +1730,6 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
size += alen;
ql_dbg(ql_dbg_disc, vha, 0x20a8,
"FIRMWARE VERSION = %s.\n", eiter->a.fw_version);
- if (callopt == CALLOPT_FDMI1)
- goto done;
/* OS Name and Version */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
@@ -1754,6 +1752,8 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
size += alen;
ql_dbg(ql_dbg_disc, vha, 0x20a9,
"OS VERSION = %s.\n", eiter->a.os_version);
+ if (callopt == CALLOPT_FDMI1)
+ goto done;
/* MAX CT Payload Length */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
@@ -2826,6 +2826,10 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
if (fcport->disc_state == DSC_DELETE_PEND)
return;
+ /* We will figure-out what happen after AUTH completes */
+ if (fcport->disc_state == DSC_LOGIN_AUTH_PEND)
+ return;
+
if (ea->sp->gen2 != fcport->login_gen) {
/* target side must have changed it. */
ql_dbg(ql_dbg_disc, vha, 0x20d3,
@@ -3498,7 +3502,16 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
continue;
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->last_rscn_gen = fcport->rscn_gen;
+ fcport->fc4_type = rp->fc4type;
found = true;
+
+ if (fcport->scan_needed) {
+ if (NVME_PRIORITY(vha->hw, fcport))
+ fcport->do_prli_nvme = 1;
+ else
+ fcport->do_prli_nvme = 0;
+ }
+
/*
* If device was not a fabric device before.
*/
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f8f471157109..1e4e3e83b5c7 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -34,7 +34,6 @@ static int qla2x00_restart_isp(scsi_qla_host_t *);
static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
static int qla84xx_init_chip(scsi_qla_host_t *);
static int qla25xx_init_queues(struct qla_hw_data *);
-static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
struct event_arg *ea);
static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
@@ -158,7 +157,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
GFP_ATOMIC);
if (!sp)
- return rval;
+ return QLA_MEMORY_ALLOC_FAILED;
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
@@ -191,7 +190,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
if (wait) {
wait_for_completion(&abt_iocb->u.abt.comp);
rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
- QLA_SUCCESS : QLA_FUNCTION_FAILED;
+ QLA_SUCCESS : QLA_ERR_FROM_FW;
sp->free(sp);
}
@@ -293,22 +292,6 @@ static void qla2x00_async_login_sp_done(srb_t *sp, int res)
sp->free(sp);
}
-static inline bool
-fcport_is_smaller(fc_port_t *fcport)
-{
- if (wwn_to_u64(fcport->port_name) <
- wwn_to_u64(fcport->vha->port_name))
- return true;
- else
- return false;
-}
-
-static inline bool
-fcport_is_bigger(fc_port_t *fcport)
-{
- return !fcport_is_smaller(fcport);
-}
-
int
qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
@@ -343,19 +326,28 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->done = qla2x00_async_login_sp_done;
- if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport))
+ if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
- else
- lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+ } else {
+ if (vha->hw->flags.edif_enabled &&
+ vha->e_dbell.db_flags & EDB_ACTIVE) {
+ lio->u.logio.flags |=
+ (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI);
+ ql_dbg(ql_dbg_disc, vha, 0x2072,
+ "Async-login: w/ FCSP %8phC hdl=%x, loopid=%x portid=%06x\n",
+ fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24);
+ } else {
+ lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+ }
+ }
if (NVME_TARGET(vha->hw, fcport))
lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
- ql_log(ql_log_warn, vha, 0x2072,
- "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x retries=%d.\n",
+ ql_dbg(ql_dbg_disc, vha, 0x2072,
+ "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
fcport->port_name, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
- fcport->login_retry);
+ fcport->d_id.b24, fcport->login_retry);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -378,7 +370,7 @@ static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
{
sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
sp->fcport->login_gen++;
- qlt_logo_completion_handler(sp->fcport, res);
+ qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]);
sp->free(sp);
}
@@ -404,10 +396,10 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->done = qla2x00_async_logout_sp_done;
ql_dbg(ql_dbg_disc, vha, 0x2070,
- "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
+ "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
sp->handle, fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
- fcport->port_name);
+ fcport->port_name, fcport->explicit_logout);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -692,11 +684,11 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
fcport = ea->fcport;
ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
+ "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, ea->rc,
fcport->login_gen, fcport->last_login_gen,
- fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
+ fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable);
if (fcport->disc_state == DSC_DELETE_PEND)
return;
@@ -810,7 +802,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
default:
switch (current_login_state) {
case DSC_LS_PRLI_COMP:
- ql_dbg(ql_dbg_disc + ql_dbg_verbose,
+ ql_dbg(ql_dbg_disc,
vha, 0x20e4, "%s %d %8phC post gpdb\n",
__func__, __LINE__, fcport->port_name);
@@ -822,6 +814,13 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
qla2x00_post_async_adisc_work(vha, fcport,
data);
break;
+ case DSC_LS_PLOGI_COMP:
+ if (vha->hw->flags.edif_enabled) {
+ /* check to see if App support Secure */
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ break;
+ }
+ fallthrough;
case DSC_LS_PORT_UNAVAIL:
default:
if (fcport->loop_id == FC_NO_LOOP_ID) {
@@ -849,6 +848,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
*/
qla2x00_set_fcport_disc_state(fcport,
DSC_DELETED);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
break;
case DSC_LS_PRLI_COMP:
if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
@@ -861,6 +861,12 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
data);
break;
case DSC_LS_PLOGI_COMP:
+ if (vha->hw->flags.edif_enabled &&
+ vha->e_dbell.db_flags & EDB_ACTIVE) {
+ /* check to see if App support secure or not */
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ break;
+ }
if (fcport_is_bigger(fcport)) {
/* local adapter is smaller */
if (fcport->loop_id != FC_NO_LOOP_ID)
@@ -1191,7 +1197,7 @@ done:
sp->free(sp);
}
-static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_work_evt *e;
@@ -1214,7 +1220,7 @@ static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
struct event_arg ea;
ql_dbg(ql_dbg_disc, vha, 0x2129,
- "%s %8phC res %d \n", __func__,
+ "%s %8phC res %x\n", __func__,
sp->fcport->port_name, res);
sp->fcport->flags &= ~FCF_ASYNC_SENT;
@@ -1227,6 +1233,8 @@ static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
ea.iop[0] = lio->u.logio.iop[0];
ea.iop[1] = lio->u.logio.iop[1];
ea.sp = sp;
+ if (res == QLA_OS_TIMER_EXPIRED)
+ ea.data[0] = QLA_OS_TIMER_EXPIRED;
qla24xx_handle_prli_done_event(vha, &ea);
}
@@ -1418,6 +1426,57 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
+static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct port_database_24xx *pd)
+{
+ int rc = 0;
+
+ if (pd->secure_login) {
+ ql_dbg(ql_dbg_disc, vha, 0x104d,
+ "Secure Login established on %8phC\n",
+ fcport->port_name);
+ fcport->flags |= FCF_FCSP_DEVICE;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x104d,
+ "non-Secure Login %8phC",
+ fcport->port_name);
+ fcport->flags &= ~FCF_FCSP_DEVICE;
+ }
+ if (vha->hw->flags.edif_enabled) {
+ if (fcport->flags & FCF_FCSP_DEVICE) {
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND);
+ /* Start edif prli timer & ring doorbell for app */
+ fcport->edif.rx_sa_set = 0;
+ fcport->edif.tx_sa_set = 0;
+ fcport->edif.rx_sa_pending = 0;
+ fcport->edif.tx_sa_pending = 0;
+
+ qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
+ fcport->d_id.b24);
+
+ if (vha->e_dbell.db_flags == EDB_ACTIVE) {
+ ql_dbg(ql_dbg_disc, vha, 0x20ef,
+ "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->edif.app_started = 1;
+ fcport->edif.app_sess_online = 1;
+
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
+ fcport->d_id.b24, 0, fcport);
+ }
+
+ rc = 1;
+ } else if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2117,
+ "%s %d %8phC post prli\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_prli_work(vha, fcport);
+ rc = 1;
+ }
+ }
+ return rc;
+}
+
static
void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
@@ -1431,12 +1490,15 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->flags &= ~FCF_ASYNC_SENT;
ql_dbg(ql_dbg_disc, vha, 0x20d2,
- "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__,
+ "%s %8phC DS %d LS %x fc4_type %x rc %x\n", __func__,
fcport->port_name, fcport->disc_state, pd->current_login_state,
fcport->fc4_type, ea->rc);
- if (fcport->disc_state == DSC_DELETE_PEND)
+ if (fcport->disc_state == DSC_DELETE_PEND) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC\n",
+ __func__, __LINE__, fcport->port_name);
return;
+ }
if (NVME_TARGET(vha->hw, fcport))
ls = pd->current_login_state >> 4;
@@ -1453,6 +1515,8 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
} else if (ea->sp->gen1 != fcport->rscn_gen) {
qla_rscn_replay(fcport);
qlt_schedule_sess_for_deletion(fcport);
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
+ __func__, __LINE__, fcport->port_name, ls);
return;
}
@@ -1460,8 +1524,14 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
case PDS_PRLI_COMPLETE:
__qla24xx_parse_gpdb(vha, fcport, pd);
break;
- case PDS_PLOGI_PENDING:
case PDS_PLOGI_COMPLETE:
+ if (qla_chk_secure_login(vha, fcport, pd)) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
+ __func__, __LINE__, fcport->port_name, ls);
+ return;
+ }
+ fallthrough;
+ case PDS_PLOGI_PENDING:
case PDS_PRLI_PENDING:
case PDS_PRLI2_PENDING:
/* Set discovery state back to GNL to Relogin attempt */
@@ -1470,6 +1540,8 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
+ __func__, __LINE__, fcport->port_name, ls);
return;
case PDS_LOGO_PENDING:
case PDS_PORT_UNAVAILABLE:
@@ -1538,11 +1610,12 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
u16 sec;
ql_dbg(ql_dbg_disc, vha, 0x20d8,
- "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
+ "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d fc4type %x\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, fcport->login_pause, fcport->flags,
fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
- fcport->login_gen, fcport->loop_id, fcport->scan_state);
+ fcport->login_gen, fcport->loop_id, fcport->scan_state,
+ fcport->fc4_type);
if (fcport->scan_state != QLA_FCPORT_FOUND)
return 0;
@@ -1715,6 +1788,12 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
if (fcport) {
+ if (fcport->flags & FCF_FCP2_DEVICE) {
+ ql_dbg(ql_dbg_disc, vha, 0x2115,
+ "Delaying session delete for FCP2 portid=%06x %8phC ",
+ fcport->d_id.b24, fcport->port_name);
+ return;
+ }
fcport->scan_needed = 1;
fcport->rscn_gen++;
}
@@ -1758,6 +1837,13 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
struct event_arg *ea)
{
+ if (N2N_TOPO(vha->hw) && fcport_is_smaller(ea->fcport) &&
+ vha->hw->flags.edif_enabled) {
+ /* check to see if App support Secure */
+ qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+ return;
+ }
+
/* for pure Target Mode, PRLI will not be initiated */
if (vha->host->active_mode == MODE_TARGET)
return;
@@ -1902,7 +1988,7 @@ qla24xx_async_abort_command(srb_t *sp)
if (handle == req->num_outstanding_cmds) {
/* Command not found. */
- return QLA_FUNCTION_FAILED;
+ return QLA_ERR_NOT_FOUND;
}
if (sp->type == SRB_FXIOCB_DCMD)
return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
@@ -1914,6 +2000,7 @@ qla24xx_async_abort_command(srb_t *sp)
static void
qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
{
+ struct srb *sp;
WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
ea->data[0]);
@@ -1941,22 +2028,27 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
break;
}
+ sp = ea->sp;
ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC priority %s, fc4type %x\n",
+ "%s %d %8phC priority %s, fc4type %x prev try %s\n",
__func__, __LINE__, ea->fcport->port_name,
vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ?
- "FCP" : "NVMe", ea->fcport->fc4_type);
+ "FCP" : "NVMe", ea->fcport->fc4_type,
+ (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) ?
+ "NVME" : "FCP");
- if (N2N_TOPO(vha->hw)) {
- if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) {
- ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME;
- ea->fcport->fc4_type |= FS_FC4TYPE_FCP;
- } else {
- ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP;
- ea->fcport->fc4_type |= FS_FC4TYPE_NVME;
- }
+ if (NVME_FCP_TARGET(ea->fcport)) {
+ if (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI)
+ ea->fcport->do_prli_nvme = 0;
+ else
+ ea->fcport->do_prli_nvme = 1;
+ } else {
+ ea->fcport->do_prli_nvme = 0;
+ }
- if (ea->fcport->n2n_link_reset_cnt < 3) {
+ if (N2N_TOPO(vha->hw)) {
+ if (ea->fcport->n2n_link_reset_cnt <
+ vha->hw->login_retry_count) {
ea->fcport->n2n_link_reset_cnt++;
vha->relogin_jif = jiffies + 2 * HZ;
/*
@@ -1964,6 +2056,7 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
* state machine
*/
set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
} else {
ql_log(ql_log_warn, vha, 0x2119,
"%s %d %8phC Unable to reconnect\n",
@@ -1975,19 +2068,6 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
* switch connect. login failed. Take connection down
* and allow relogin to retrigger
*/
- if (NVME_FCP_TARGET(ea->fcport)) {
- ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post %s prli\n",
- __func__, __LINE__,
- ea->fcport->port_name,
- (ea->fcport->fc4_type & FS_FC4TYPE_NVME)
- ? "NVMe" : "FCP");
- if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME)
- ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME;
- else
- ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP;
- }
-
ea->fcport->flags &= ~FCF_ASYNC_SENT;
ea->fcport->keep_nport_handle = 0;
ea->fcport->logout_on_delete = 1;
@@ -2053,26 +2133,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
* force a relogin attempt via implicit LOGO, PLOGI, and PRLI
* requests.
*/
- if (NVME_TARGET(vha->hw, ea->fcport)) {
- ql_dbg(ql_dbg_disc, vha, 0x2117,
- "%s %d %8phC post prli\n",
- __func__, __LINE__, ea->fcport->port_name);
- qla24xx_post_prli_work(vha, ea->fcport);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20ea,
- "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
- __func__, __LINE__, ea->fcport->port_name,
- ea->fcport->loop_id, ea->fcport->d_id.b24);
-
+ if (vha->hw->flags.edif_enabled) {
set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
ea->fcport->logout_on_delete = 1;
ea->fcport->send_els_logo = 0;
- ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
+ ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+ } else {
+ if (NVME_TARGET(vha->hw, fcport)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2117,
+ "%s %d %8phC post prli\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_prli_work(vha, fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ea,
+ "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
+ __func__, __LINE__, fcport->port_name,
+ fcport->loop_id, fcport->d_id.b24);
+
+ set_bit(fcport->loop_id, vha->hw->loop_id_map);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport->chip_reset = vha->hw->base_qpair->chip_reset;
+ fcport->logout_on_delete = 1;
+ fcport->send_els_logo = 0;
+ fcport->fw_login_state = DSC_LS_PRLI_COMP;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ }
}
break;
case MBS_COMMAND_ERROR:
@@ -3877,7 +3969,8 @@ enable_82xx_npiv:
}
/* Enable PUREX PASSTHRU */
- if (ql2xrdpenable || ha->flags.scm_supported_f)
+ if (ql2xrdpenable || ha->flags.scm_supported_f ||
+ ha->flags.edif_enabled)
qla25xx_set_els_cmds_supported(vha);
} else
goto failed;
@@ -4062,7 +4155,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
}
/* Move PUREX, ABTS RX & RIDA to ATIOQ */
- if (ql2xmvasynctoatio &&
+ if (ql2xmvasynctoatio && !ha->flags.edif_enabled &&
(IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
if (qla_tgt_mode_enabled(vha) ||
qla_dual_mode_enabled(vha))
@@ -4081,16 +4174,30 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
qla_dual_mode_enabled(vha))
ha->fw_options[2] |= BIT_4;
else
- ha->fw_options[2] &= ~BIT_4;
+ ha->fw_options[2] &= ~(BIT_4);
/* Reserve 1/2 of emergency exchanges for ELS.*/
if (qla2xuseresexchforels)
ha->fw_options[2] |= BIT_8;
else
ha->fw_options[2] &= ~BIT_8;
+
+ /*
+ * N2N: set Secure=1 for PLOGI ACC and
+ * fw shal not send PRLI after PLOGI Acc
+ */
+ if (ha->flags.edif_enabled &&
+ vha->e_dbell.db_flags & EDB_ACTIVE) {
+ ha->fw_options[3] |= BIT_15;
+ ha->flags.n2n_fw_acc_sec = 1;
+ } else {
+ ha->fw_options[3] &= ~BIT_15;
+ ha->flags.n2n_fw_acc_sec = 0;
+ }
}
- if (ql2xrdpenable || ha->flags.scm_supported_f)
+ if (ql2xrdpenable || ha->flags.scm_supported_f ||
+ ha->flags.edif_enabled)
ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB;
/* Enable Async 8130/8131 events -- transceiver insertion/removal */
@@ -4289,8 +4396,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
-
if (IS_QLAFX00(ha)) {
rval = qlafx00_init_firmware(vha, ha->init_cb_size);
goto next_check;
@@ -4299,6 +4404,12 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
/* Update any ISP specific firmware options before initialization. */
ha->isp_ops->update_fw_options(vha);
+ ql_dbg(ql_dbg_init, vha, 0x00d1,
+ "Issue init firmware FW opt 1-3= %08x %08x %08x.\n",
+ le32_to_cpu(mid_init_cb->init_cb.firmware_options_1),
+ le32_to_cpu(mid_init_cb->init_cb.firmware_options_2),
+ le32_to_cpu(mid_init_cb->init_cb.firmware_options_3));
+
if (ha->flags.npiv_supported) {
if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
@@ -4531,11 +4642,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
/* initialize */
ha->min_external_loopid = SNS_FIRST_LOOP_ID;
ha->operating_mode = LOOP;
- ha->switch_cap = 0;
switch (topo) {
case 0:
ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
+ ha->switch_cap = 0;
ha->current_topology = ISP_CFG_NL;
strcpy(connect_type, "(Loop)");
break;
@@ -4549,6 +4660,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
case 2:
ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
+ ha->switch_cap = 0;
ha->operating_mode = P2P;
ha->current_topology = ISP_CFG_N;
strcpy(connect_type, "(N_Port-to-N_Port)");
@@ -4565,6 +4677,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
default:
ql_dbg(ql_dbg_disc, vha, 0x200f,
"HBA in unknown topology %x, using NL.\n", topo);
+ ha->switch_cap = 0;
ha->current_topology = ISP_CFG_NL;
strcpy(connect_type, "(Loop)");
break;
@@ -4577,7 +4690,10 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
id.b.al_pa = al_pa;
id.b.rsvd_1 = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (!(topo == 2 && ha->flags.n2n_bigger))
+ if (vha->hw->flags.edif_enabled) {
+ if (topo != 2)
+ qlt_update_host_map(vha, id);
+ } else if (!(topo == 2 && ha->flags.n2n_bigger))
qlt_update_host_map(vha, id);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -5071,6 +5187,16 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_LIST_HEAD(&fcport->sess_cmd_list);
spin_lock_init(&fcport->sess_cmd_lock);
+ spin_lock_init(&fcport->edif.sa_list_lock);
+ INIT_LIST_HEAD(&fcport->edif.tx_sa_list);
+ INIT_LIST_HEAD(&fcport->edif.rx_sa_list);
+
+ if (vha->e_dbell.db_flags == EDB_ACTIVE)
+ fcport->edif.app_started = 1;
+
+ spin_lock_init(&fcport->edif.indx_list_lock);
+ INIT_LIST_HEAD(&fcport->edif.edif_indx_list);
+
return fcport;
}
@@ -5084,8 +5210,13 @@ qla2x00_free_fcport(fc_port_t *fcport)
fcport->ct_desc.ct_sns = NULL;
}
+
+ qla_edif_flush_sa_ctl_lists(fcport);
list_del(&fcport->list);
qla2x00_clear_loop_id(fcport);
+
+ qla_edif_list_del(fcport);
+
kfree(fcport);
}
@@ -5204,6 +5335,16 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
"LOOP READY.\n");
ha->flags.fw_init_done = 1;
+ if (ha->flags.edif_enabled &&
+ !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
+ N2N_TOPO(vha->hw)) {
+ /*
+ * use port online to wake up app to get ready
+ * for authentication
+ */
+ qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 0);
+ }
+
/*
* Process any ATIO queue entries that came in
* while we weren't online.
@@ -5223,7 +5364,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
"%s *** FAILED ***.\n", __func__);
} else {
ql_dbg(ql_dbg_disc, vha, 0x206b,
- "%s: exiting normally.\n", __func__);
+ "%s: exiting normally. local port wwpn %8phN id %06x)\n",
+ __func__, vha->port_name, vha->d_id.b24);
}
/* Restore state if a resync event occurred during processing */
@@ -5243,6 +5385,8 @@ static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
unsigned long flags;
fc_port_t *fcport;
+ ql_dbg(ql_dbg_disc, vha, 0x206a, "%s %d.\n", __func__, __LINE__);
+
if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags))
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -6459,13 +6603,13 @@ void
qla2x00_update_fcports(scsi_qla_host_t *base_vha)
{
fc_port_t *fcport;
- struct scsi_qla_host *vha;
+ struct scsi_qla_host *vha, *tvp;
struct qla_hw_data *ha = base_vha->hw;
unsigned long flags;
spin_lock_irqsave(&ha->vport_slock, flags);
/* Go with deferred removal of rport references. */
- list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
+ list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) {
atomic_inc(&vha->vref_count);
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->drport &&
@@ -6810,7 +6954,8 @@ void
qla2x00_quiesce_io(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *vp;
+ struct scsi_qla_host *vp, *tvp;
+ unsigned long flags;
ql_dbg(ql_dbg_dpc, vha, 0x401d,
"Quiescing I/O - ha=%p.\n", ha);
@@ -6819,8 +6964,18 @@ qla2x00_quiesce_io(scsi_qla_host_t *vha)
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
qla2x00_mark_all_devices_lost(vha);
- list_for_each_entry(vp, &ha->vp_list, list)
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
qla2x00_mark_all_devices_lost(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer,
@@ -6835,7 +6990,7 @@ void
qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *vp;
+ struct scsi_qla_host *vp, *tvp;
unsigned long flags;
fc_port_t *fcport;
u16 i;
@@ -6903,7 +7058,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
qla2x00_mark_all_devices_lost(vha);
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
@@ -6925,7 +7080,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
fcport->scan_state = 0;
}
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
@@ -6969,7 +7124,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
int rval;
uint8_t status = 0;
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *vp;
+ struct scsi_qla_host *vp, *tvp;
struct req_que *req = ha->req_q_map[0];
unsigned long flags;
@@ -7125,7 +7280,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
qla2x00_configure_hba(vha);
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
if (vp->vp_idx) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
@@ -8810,7 +8965,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
{
int status, rval;
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *vp;
+ struct scsi_qla_host *vp, *tvp;
unsigned long flags;
status = qla2x00_init_rings(vha);
@@ -8882,7 +9037,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
"qla82xx_restart_isp succeeded.\n");
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
if (vp->vp_idx) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 82937c6bd9c4..5f3b7995cc8f 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -478,3 +478,19 @@ bool qla_pci_disconnected(struct scsi_qla_host *vha,
}
return ret;
}
+
+static inline bool
+fcport_is_smaller(fc_port_t *fcport)
+{
+ if (wwn_to_u64(fcport->port_name) <
+ wwn_to_u64(fcport->vha->port_name))
+ return true;
+ else
+ return false;
+}
+
+static inline bool
+fcport_is_bigger(fc_port_t *fcport)
+{
+ return !fcport_is_smaller(fcport);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d0ee843f6b04..9d4ad1d2b00a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -118,7 +118,7 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
*
* Returns a pointer to the continuation type 1 IOCB packet.
*/
-static inline cont_a64_entry_t *
+cont_a64_entry_t *
qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
{
cont_a64_entry_t *cont_pkt;
@@ -145,7 +145,6 @@ inline int
qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- uint8_t guard = scsi_host_get_guard(cmd->device->host);
/* We always use DIFF Bundling for best performance */
*fw_prot_opts = 0;
@@ -166,7 +165,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- if (guard & SHOST_DIX_GUARD_IP)
+ if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
*fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
else
*fw_prot_opts |= PO_MODE_DIF_PASS;
@@ -176,6 +175,9 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
break;
}
+ if (!(cmd->prot_flags & SCSI_PROT_GUARD_CHECK))
+ *fw_prot_opts |= PO_DISABLE_GUARD_CHECK;
+
return scsi_prot_sg_count(cmd);
}
@@ -772,74 +774,19 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- switch (scsi_get_prot_type(cmd)) {
- case SCSI_PROT_DIF_TYPE0:
- /*
- * No check for ql2xenablehba_err_chk, as it would be an
- * I/O error if hba tag generation is not done.
- */
- pkt->ref_tag = cpu_to_le32((uint32_t)
- (0xffffffff & scsi_get_lba(cmd)));
-
- if (!qla2x00_hba_err_chk_enabled(sp))
- break;
+ pkt->ref_tag = cpu_to_le32(scsi_prot_ref_tag(cmd));
+ if (cmd->prot_flags & SCSI_PROT_REF_CHECK &&
+ qla2x00_hba_err_chk_enabled(sp)) {
pkt->ref_tag_mask[0] = 0xff;
pkt->ref_tag_mask[1] = 0xff;
pkt->ref_tag_mask[2] = 0xff;
pkt->ref_tag_mask[3] = 0xff;
- break;
-
- /*
- * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
- * match LBA in CDB + N
- */
- case SCSI_PROT_DIF_TYPE2:
- pkt->app_tag = cpu_to_le16(0);
- pkt->app_tag_mask[0] = 0x0;
- pkt->app_tag_mask[1] = 0x0;
-
- pkt->ref_tag = cpu_to_le32((uint32_t)
- (0xffffffff & scsi_get_lba(cmd)));
-
- if (!qla2x00_hba_err_chk_enabled(sp))
- break;
-
- /* enable ALL bytes of the ref tag */
- pkt->ref_tag_mask[0] = 0xff;
- pkt->ref_tag_mask[1] = 0xff;
- pkt->ref_tag_mask[2] = 0xff;
- pkt->ref_tag_mask[3] = 0xff;
- break;
-
- /* For Type 3 protection: 16 bit GUARD only */
- case SCSI_PROT_DIF_TYPE3:
- pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
- pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
- 0x00;
- break;
-
- /*
- * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
- * 16 bit app tag.
- */
- case SCSI_PROT_DIF_TYPE1:
- pkt->ref_tag = cpu_to_le32((uint32_t)
- (0xffffffff & scsi_get_lba(cmd)));
- pkt->app_tag = cpu_to_le16(0);
- pkt->app_tag_mask[0] = 0x0;
- pkt->app_tag_mask[1] = 0x0;
-
- if (!qla2x00_hba_err_chk_enabled(sp))
- break;
-
- /* enable ALL bytes of the ref tag */
- pkt->ref_tag_mask[0] = 0xff;
- pkt->ref_tag_mask[1] = 0xff;
- pkt->ref_tag_mask[2] = 0xff;
- pkt->ref_tag_mask[3] = 0xff;
- break;
}
+
+ pkt->app_tag = cpu_to_le16(0);
+ pkt->app_tag_mask[0] = 0x0;
+ pkt->app_tag_mask[1] = 0x0;
}
int
@@ -905,7 +852,7 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
memset(&sgx, 0, sizeof(struct qla2_sgx));
if (sp) {
cmd = GET_CMD_SP(sp);
- prot_int = cmd->device->sector_size;
+ prot_int = scsi_prot_interval(cmd);
sgx.tot_bytes = scsi_bufflen(cmd);
sgx.cur_sg = scsi_sglist(cmd);
@@ -1605,6 +1552,9 @@ qla24xx_start_scsi(srb_t *sp)
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
+ if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE))
+ return qla28xx_start_scsi_edif(sp);
+
/* Setup device pointers. */
req = vha->req;
rsp = req->rsp;
@@ -1963,6 +1913,9 @@ qla2xxx_start_scsi_mq(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair = sp->qpair;
+ if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE))
+ return qla28xx_start_scsi_edif(sp);
+
/* Acquire qpair specific lock */
spin_lock_irqsave(&qpair->qp_lock, flags);
@@ -2466,6 +2419,12 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
+ if (lio->u.logio.flags & SRB_LOGIN_FCSP) {
+ logio->control_flags |=
+ cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI);
+ logio->io_parameter[0] =
+ cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO);
+ }
}
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2789,7 +2748,10 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->s_id[0] = vha->d_id.b.domain;
if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
- els_iocb->control_flags = 0;
+ if (vha->hw->flags.edif_enabled)
+ els_iocb->control_flags = cpu_to_le16(ECF_SEC_LOGIN);
+ else
+ els_iocb->control_flags = 0;
els_iocb->tx_byte_count = els_iocb->tx_len =
cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
@@ -2806,7 +2768,6 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
(uint8_t *)els_iocb,
sizeof(*els_iocb));
} else {
- els_iocb->control_flags = cpu_to_le16(1 << 13);
els_iocb->tx_byte_count =
cpu_to_le32(sizeof(struct els_logo_payload));
put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
@@ -3030,7 +2991,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
elsio = &sp->u.iocb_cmd;
ql_dbg(ql_dbg_io, vha, 0x3073,
- "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
+ "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
@@ -3073,6 +3034,13 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
elsio->u.els_plogi.els_cmd = els_opcode;
elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
+ if (els_opcode == ELS_DCMD_PLOGI && vha->hw->flags.edif_enabled &&
+ vha->e_dbell.db_flags & EDB_ACTIVE) {
+ struct fc_els_flogi *p = ptr;
+
+ p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC);
+ }
+
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
(uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
@@ -3106,6 +3074,43 @@ done:
return rval;
}
+/* it is assume qpair lock is held */
+void qla_els_pt_iocb(struct scsi_qla_host *vha,
+ struct els_entry_24xx *els_iocb,
+ struct qla_els_pt_arg *a)
+{
+ els_iocb->entry_type = ELS_IOCB_TYPE;
+ els_iocb->entry_count = 1;
+ els_iocb->sys_define = 0;
+ els_iocb->entry_status = 0;
+ els_iocb->handle = QLA_SKIP_HANDLE;
+ els_iocb->nport_handle = a->nport_handle;
+ els_iocb->rx_xchg_address = a->rx_xchg_address;
+ els_iocb->tx_dsd_count = cpu_to_le16(1);
+ els_iocb->vp_index = a->vp_idx;
+ els_iocb->sof_type = EST_SOFI3;
+ els_iocb->rx_dsd_count = cpu_to_le16(0);
+ els_iocb->opcode = a->els_opcode;
+
+ els_iocb->d_id[0] = a->did.b.al_pa;
+ els_iocb->d_id[1] = a->did.b.area;
+ els_iocb->d_id[2] = a->did.b.domain;
+ /* For SID the byte order is different than DID */
+ els_iocb->s_id[1] = vha->d_id.b.al_pa;
+ els_iocb->s_id[2] = vha->d_id.b.area;
+ els_iocb->s_id[0] = vha->d_id.b.domain;
+
+ els_iocb->control_flags = cpu_to_le16(a->control_flags);
+
+ els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
+ els_iocb->tx_len = cpu_to_le32(a->tx_len);
+ put_unaligned_le64(a->tx_addr, &els_iocb->tx_address);
+
+ els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count);
+ els_iocb->rx_len = cpu_to_le32(a->rx_len);
+ put_unaligned_le64(a->rx_addr, &els_iocb->rx_address);
+}
+
static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
@@ -3700,6 +3705,16 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
nack->u.isp24.srr_reject_code = 0;
nack->u.isp24.srr_reject_code_expl = 0;
nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+ if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
+ (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP) &&
+ sp->vha->hw->flags.edif_enabled) {
+ ql_dbg(ql_dbg_disc, sp->vha, 0x3074,
+ "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n",
+ sp->name, sp->handle, sp->fcport->loop_id,
+ sp->fcport->d_id.b24);
+ nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
+ }
}
/*
@@ -3804,6 +3819,10 @@ qla2x00_start_sp(srb_t *sp)
case SRB_ELS_CMD_HST:
qla24xx_els_iocb(sp, pkt);
break;
+ case SRB_ELS_CMD_HST_NOLOGIN:
+ qla_els_pt_iocb(sp->vha, pkt, &sp->u.bsg_cmd.u.els_arg);
+ ((struct els_entry_24xx *)pkt)->handle = sp->handle;
+ break;
case SRB_CT_CMD:
IS_FWI2_CAPABLE(ha) ?
qla24xx_ct_iocb(sp, pkt) :
@@ -3851,6 +3870,12 @@ qla2x00_start_sp(srb_t *sp)
case SRB_PRLO_CMD:
qla24xx_prlo_iocb(sp, pkt);
break;
+ case SRB_SA_UPDATE:
+ qla24xx_sa_update_iocb(sp, pkt);
+ break;
+ case SRB_SA_REPLACE:
+ qla24xx_sa_replace_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d9fb093a60a1..ece60267b971 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -170,6 +170,149 @@ qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
}
/**
+ * __qla_consume_iocb - this routine is used to tell fw driver has processed
+ * or consumed the head IOCB along with the continuation IOCB's from the
+ * provided respond queue.
+ * @vha: host adapter pointer
+ * @pkt: pointer to current packet. On return, this pointer shall move
+ * to the next packet.
+ * @rsp: respond queue pointer.
+ *
+ * it is assumed pkt is the head iocb, not the continuation iocbk
+ */
+void __qla_consume_iocb(struct scsi_qla_host *vha,
+ void **pkt, struct rsp_que **rsp)
+{
+ struct rsp_que *rsp_q = *rsp;
+ response_t *new_pkt;
+ uint16_t entry_count_remaining;
+ struct purex_entry_24xx *purex = *pkt;
+
+ entry_count_remaining = purex->entry_count;
+ while (entry_count_remaining > 0) {
+ new_pkt = rsp_q->ring_ptr;
+ *pkt = new_pkt;
+
+ rsp_q->ring_index++;
+ if (rsp_q->ring_index == rsp_q->length) {
+ rsp_q->ring_index = 0;
+ rsp_q->ring_ptr = rsp_q->ring;
+ } else {
+ rsp_q->ring_ptr++;
+ }
+
+ new_pkt->signature = RESPONSE_PROCESSED;
+ /* flush signature */
+ wmb();
+ --entry_count_remaining;
+ }
+}
+
+/**
+ * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB
+ * and save to provided buffer
+ * @vha: host adapter pointer
+ * @pkt: pointer Purex IOCB
+ * @rsp: respond queue
+ * @buf: extracted ELS payload copy here
+ * @buf_len: buffer length
+ */
+int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha,
+ void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len)
+{
+ struct purex_entry_24xx *purex = *pkt;
+ struct rsp_que *rsp_q = *rsp;
+ sts_cont_entry_t *new_pkt;
+ uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
+ uint16_t buffer_copy_offset = 0;
+ uint16_t entry_count_remaining;
+ u16 tpad;
+
+ entry_count_remaining = purex->entry_count;
+ total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
+ - PURX_ELS_HEADER_SIZE;
+
+ /*
+ * end of payload may not end in 4bytes boundary. Need to
+ * round up / pad for room to swap, before saving data
+ */
+ tpad = roundup(total_bytes, 4);
+
+ if (buf_len < tpad) {
+ ql_dbg(ql_dbg_async, vha, 0x5084,
+ "%s buffer is too small %d < %d\n",
+ __func__, buf_len, tpad);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return -EIO;
+ }
+
+ pending_bytes = total_bytes = tpad;
+ no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
+ sizeof(purex->els_frame_payload) : pending_bytes;
+
+ memcpy(buf, &purex->els_frame_payload[0], no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+
+ ((response_t *)purex)->signature = RESPONSE_PROCESSED;
+ /* flush signature */
+ wmb();
+
+ do {
+ while ((total_bytes > 0) && (entry_count_remaining > 0)) {
+ new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
+ *pkt = new_pkt;
+
+ if (new_pkt->entry_type != STATUS_CONT_TYPE) {
+ ql_log(ql_log_warn, vha, 0x507a,
+ "Unexpected IOCB type, partial data 0x%x\n",
+ buffer_copy_offset);
+ break;
+ }
+
+ rsp_q->ring_index++;
+ if (rsp_q->ring_index == rsp_q->length) {
+ rsp_q->ring_index = 0;
+ rsp_q->ring_ptr = rsp_q->ring;
+ } else {
+ rsp_q->ring_ptr++;
+ }
+ no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
+ sizeof(new_pkt->data) : pending_bytes;
+ if ((buffer_copy_offset + no_bytes) <= total_bytes) {
+ memcpy((buf + buffer_copy_offset), new_pkt->data,
+ no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+ } else {
+ ql_log(ql_log_warn, vha, 0x5044,
+ "Attempt to copy more that we got, optimizing..%x\n",
+ buffer_copy_offset);
+ memcpy((buf + buffer_copy_offset), new_pkt->data,
+ total_bytes - buffer_copy_offset);
+ }
+
+ ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
+ /* flush signature */
+ wmb();
+ }
+
+ if (pending_bytes != 0 || entry_count_remaining != 0) {
+ ql_log(ql_log_fatal, vha, 0x508b,
+ "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n",
+ total_bytes, entry_count_remaining);
+ return -EIO;
+ }
+ } while (entry_count_remaining > 0);
+
+ be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2);
+
+ return 0;
+}
+
+/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
* @irq: interrupt number
* @dev_id: SCSI driver HA context
@@ -505,7 +648,7 @@ const char *
qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
{
static const char *const link_speeds[] = {
- "1", "2", "?", "4", "8", "16", "32", "10"
+ "1", "2", "?", "4", "8", "16", "32", "64", "10"
};
#define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
@@ -1727,6 +1870,9 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
srb_t *sp;
uint16_t index;
+ if (pkt->handle == QLA_SKIP_HANDLE)
+ return NULL;
+
index = LSW(pkt->handle);
if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x5031,
@@ -1971,7 +2117,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
static void
-qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
+qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
struct sts_entry_24xx *pkt, int iocb_type)
{
struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
@@ -1982,18 +2128,58 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
struct fc_bsg_reply *bsg_reply;
uint16_t comp_status;
uint32_t fw_status[3];
- int res;
+ int res, logit = 1;
struct srb_iocb *els;
+ uint n;
+ scsi_qla_host_t *vha;
+ struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt;
- sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
if (!sp)
return;
+ bsg_job = sp->u.bsg_job;
+ vha = sp->vha;
type = NULL;
+
+ comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
+ fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
+ fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
+
switch (sp->type) {
case SRB_ELS_CMD_RPT:
case SRB_ELS_CMD_HST:
+ type = "rpt hst";
+ break;
+ case SRB_ELS_CMD_HST_NOLOGIN:
type = "els";
+ {
+ struct els_entry_24xx *els = (void *)pkt;
+ struct qla_bsg_auth_els_request *p =
+ (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+ ql_dbg(ql_dbg_user, vha, 0x700f,
+ "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n",
+ __func__, sc_to_str(p->e.sub_cmd),
+ e->d_id[2], e->d_id[1], e->d_id[0],
+ comp_status, p->e.extra_rx_xchg_address, bsg_job);
+
+ if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) {
+ if (sp->remap.remapped) {
+ n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ sp->remap.rsp.buf,
+ sp->remap.rsp.len);
+ ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e,
+ "%s: SG copied %x of %x\n",
+ __func__, n, sp->remap.rsp.len);
+ } else {
+ ql_dbg(ql_dbg_user, vha, 0x700f,
+ "%s: NOT REMAPPED (error)...!!!\n",
+ __func__);
+ }
+ }
+ }
break;
case SRB_CT_CMD:
type = "ct pass-through";
@@ -2023,10 +2209,6 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
}
- comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
- fw_status[1] = le32_to_cpu(ese->error_subcode_1);
- fw_status[2] = le32_to_cpu(ese->error_subcode_2);
-
if (iocb_type == ELS_IOCB_TYPE) {
els = &sp->u.iocb_cmd;
els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
@@ -2040,15 +2222,53 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
res = DID_OK << 16;
els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
ese->total_byte_count));
+
+ if (sp->remap.remapped &&
+ ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) {
+ ql_dbg(ql_dbg_user, vha, 0x503f,
+ "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x",
+ __func__, e->s_id[0], e->s_id[2], e->s_id[1],
+ e->d_id[2], e->d_id[1], e->d_id[0]);
+ logit = 0;
+ }
+
+ } else if (comp_status == CS_PORT_LOGGED_OUT) {
+ els->u.els_plogi.len = 0;
+ res = DID_IMM_RETRY << 16;
+ qlt_schedule_sess_for_deletion(sp->fcport);
} else {
els->u.els_plogi.len = 0;
res = DID_ERROR << 16;
}
+
+ if (logit) {
+ if (sp->remap.remapped &&
+ ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
+ ql_dbg(ql_dbg_user, vha, 0x503f,
+ "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
+ type, sp->handle, comp_status);
+
+ ql_dbg(ql_dbg_user, vha, 0x503f,
+ "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
+ fw_status[1], fw_status[2],
+ le32_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count),
+ e->s_id[0], e->s_id[2], e->s_id[1],
+ e->d_id[2], e->d_id[1], e->d_id[0]);
+ } else {
+ ql_log(ql_log_info, vha, 0x503f,
+ "%s IOCB Done hdl=%x comp_status=0x%x\n",
+ type, sp->handle, comp_status);
+ ql_log(ql_log_info, vha, 0x503f,
+ "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
+ fw_status[1], fw_status[2],
+ le32_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count),
+ e->s_id[0], e->s_id[2], e->s_id[1],
+ e->d_id[2], e->d_id[1], e->d_id[0]);
+ }
+ }
}
- ql_dbg(ql_dbg_disc, vha, 0x503f,
- "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
- type, sp->handle, comp_status, fw_status[1], fw_status[2],
- le32_to_cpu(ese->total_byte_count));
goto els_ct_done;
}
@@ -2107,6 +2327,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
struct srb_iocb *lio;
uint16_t *data;
uint32_t iop[2];
+ int logit = 1;
sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
if (!sp)
@@ -2153,6 +2374,10 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (sp->type != SRB_LOGIN_CMD)
goto logio_done;
+ lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]);
+ if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP)
+ fcport->flags |= FCF_FCSP_DEVICE;
+
iop[0] = le32_to_cpu(logio->io_parameter[0]);
if (iop[0] & BIT_4) {
fcport->port_type = FCT_TARGET;
@@ -2180,9 +2405,11 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
case LSC_SCODE_PORTID_USED:
data[0] = MBS_PORT_ID_USED;
data[1] = LSW(iop[1]);
+ logit = 0;
break;
case LSC_SCODE_NPORT_USED:
data[0] = MBS_LOOP_ID_USED;
+ logit = 0;
break;
case LSC_SCODE_CMD_FAILED:
if (iop[1] == 0x0606) {
@@ -2215,12 +2442,20 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
}
- ql_log(ql_log_warn, sp->vha, 0x5037,
- "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
- type, sp->handle, fcport->d_id.b24, fcport->port_name,
- le16_to_cpu(logio->comp_status),
- le32_to_cpu(logio->io_parameter[0]),
- le32_to_cpu(logio->io_parameter[1]));
+ if (logit)
+ ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: "
+ "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
+ type, sp->handle, fcport->d_id.b24, fcport->port_name,
+ le16_to_cpu(logio->comp_status),
+ le32_to_cpu(logio->io_parameter[0]),
+ le32_to_cpu(logio->io_parameter[1]));
+ else
+ ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: "
+ "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
+ type, sp->handle, fcport->d_id.b24, fcport->port_name,
+ le16_to_cpu(logio->comp_status),
+ le32_to_cpu(logio->io_parameter[0]),
+ le32_to_cpu(logio->io_parameter[1]));
logio_done:
sp->done(sp, 0);
@@ -2417,6 +2652,15 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
case CS_PORT_UNAVAILABLE:
case CS_PORT_LOGGED_OUT:
fcport->nvme_flag |= NVME_FLAG_RESETTING;
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
+ "Port to be marked lost on fcport=%06x, current "
+ "port state= %s comp_status %x.\n",
+ fcport->d_id.b24, port_state_str[FCS_ONLINE],
+ comp_status);
+
+ qlt_schedule_sess_for_deletion(fcport);
+ }
fallthrough;
case CS_ABORTED:
case CS_PORT_BUSY:
@@ -2969,9 +3213,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
+ /* Fast path completion. */
+ qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24);
sp->qpair->cmd_completion_cnt++;
- /* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
qla2x00_process_completed_request(vha, req, handle);
@@ -3366,6 +3611,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
}
break;
+ case SA_UPDATE_IOCB_TYPE:
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
@@ -3453,6 +3699,63 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
}
/**
+ * qla_chk_cont_iocb_avail - check for all continuation iocbs are available
+ * before iocb processing can start.
+ * @vha: host adapter pointer
+ * @rsp: respond queue
+ * @pkt: head iocb describing how many continuation iocb
+ * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
+ */
+static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, response_t *pkt)
+{
+ int start_pkt_ring_index, end_pkt_ring_index, n_ring_index;
+ response_t *end_pkt;
+ int rc = 0;
+ u32 rsp_q_in;
+
+ if (pkt->entry_count == 1)
+ return rc;
+
+ /* ring_index was pre-increment. set it back to current pkt */
+ if (rsp->ring_index == 0)
+ start_pkt_ring_index = rsp->length - 1;
+ else
+ start_pkt_ring_index = rsp->ring_index - 1;
+
+ if ((start_pkt_ring_index + pkt->entry_count) >= rsp->length)
+ end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count -
+ rsp->length - 1;
+ else
+ end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count - 1;
+
+ end_pkt = rsp->ring + end_pkt_ring_index;
+
+ /* next pkt = end_pkt + 1 */
+ n_ring_index = end_pkt_ring_index + 1;
+ if (n_ring_index >= rsp->length)
+ n_ring_index = 0;
+
+ rsp_q_in = rsp->qpair->use_shadow_reg ? *rsp->in_ptr :
+ rd_reg_dword(rsp->rsp_q_in);
+
+ /* rsp_q_in is either wrapped or pointing beyond endpkt */
+ if ((rsp_q_in < start_pkt_ring_index && rsp_q_in < n_ring_index) ||
+ rsp_q_in >= n_ring_index)
+ /* all IOCBs arrived. */
+ rc = 0;
+ else
+ rc = -EIO;
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x5091,
+ "%s - ring %p pkt %p end pkt %p entry count %#x rsp_q_in %d rc %d\n",
+ __func__, rsp->ring, pkt, end_pkt, pkt->entry_count,
+ rsp_q_in, rc);
+
+ return rc;
+}
+
+/**
* qla24xx_process_response_queue() - Process response queue entries.
* @vha: SCSI driver HA context
* @rsp: response queue
@@ -3592,12 +3895,26 @@ process_err:
qla27xx_process_purex_fpin);
break;
+ case ELS_AUTH_ELS:
+ if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt)) {
+ ql_dbg(ql_dbg_init, vha, 0x5091,
+ "Defer processing ELS opcode %#x...\n",
+ purex_entry->els_frame_payload[3]);
+ return;
+ }
+ qla24xx_auth_els(vha, (void **)&pkt, &rsp);
+ break;
default:
ql_log(ql_log_warn, vha, 0x509c,
"Discarding ELS Request opcode 0x%x\n",
purex_entry->els_frame_payload[3]);
}
break;
+ case SA_UPDATE_IOCB_TYPE:
+ qla28xx_sa_update_iocb_entry(vha, rsp->req,
+ (struct sa_update_28xx *)pkt);
+ break;
+
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
@@ -4201,6 +4518,8 @@ skip_msi:
ql_dbg(ql_dbg_init, vha, 0x0125,
"INTa mode: Enabled.\n");
ha->flags.mr_intr_valid = 1;
+ /* Set max_qpair to 0, as MSI-X and MSI in not enabled */
+ ha->max_qpairs = 0;
}
clear_risc_ints:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 9f3ad8aa649c..7811c4952035 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -663,6 +663,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
}
#define NVME_ENABLE_FLAG BIT_3
+#define EDIF_HW_SUPPORT BIT_10
/*
* qla2x00_execute_fw
@@ -739,7 +740,7 @@ again:
mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
- mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
+ mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1;
} else {
mcp->mb[1] = LSW(risc_addr);
mcp->out_mb |= MBX_1;
@@ -795,6 +796,12 @@ again:
}
}
+ if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) {
+ ha->flags.edif_hw = 1;
+ ql_log(ql_log_info, vha, 0xffff,
+ "%s: edif HW\n", __func__);
+ }
+
done:
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
"Done %s.\n", __func__);
@@ -1130,6 +1137,13 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ha->fw_attributes_ext[0]);
vha->flags.nvme2_enabled = 1;
}
+
+ if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable &&
+ (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) {
+ ha->flags.edif_enabled = 1;
+ ql_log(ql_log_info, vha, 0xffff,
+ "%s: edif is enabled\n", __func__);
+ }
}
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
@@ -3231,7 +3245,7 @@ qla24xx_abort_command(srb_t *sp)
if (sp->qpair)
req = sp->qpair->req;
else
- return QLA_FUNCTION_FAILED;
+ return QLA_ERR_NO_QPAIR;
if (ql2xasynctmfenable)
return qla24xx_async_abort_command(sp);
@@ -3244,7 +3258,7 @@ qla24xx_abort_command(srb_t *sp)
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
if (handle == req->num_outstanding_cmds) {
/* Command not found. */
- return QLA_FUNCTION_FAILED;
+ return QLA_ERR_NOT_FOUND;
}
abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
@@ -4035,6 +4049,10 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->n2n_flag = 1;
fcport->keep_nport_handle = 1;
+ fcport->login_retry = vha->hw->login_retry_count;
+ fcport->fc4_type = FS_FC4TYPE_FCP;
+ if (vha->flags.nvme_enabled)
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
if (wwn_to_u64(vha->port_name) >
wwn_to_u64(fcport->port_name)) {
@@ -4172,6 +4190,16 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->u.f2.remote_nport_id[1];
fcport->d_id.b.al_pa =
rptid_entry->u.f2.remote_nport_id[0];
+
+ /*
+ * For the case where remote port sending PRLO, FW
+ * sends up RIDA Format 2 as an indication of session
+ * loss. In other word, FW state change from PRLI
+ * complete back to PLOGI complete. Delete the
+ * session and let relogin drive the reconnect.
+ */
+ if (atomic_read(&fcport->state) == FCS_ONLINE)
+ qlt_schedule_sess_for_deletion(fcport);
}
}
}
@@ -4946,7 +4974,7 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
return rval;
}
-#define PUREX_CMD_COUNT 2
+#define PUREX_CMD_COUNT 4
int
qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
{
@@ -4954,6 +4982,7 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
uint8_t *els_cmd_map;
+ uint8_t active_cnt = 0;
dma_addr_t els_cmd_map_dma;
uint8_t cmd_opcode[PUREX_CMD_COUNT];
uint8_t i, index, purex_bit;
@@ -4975,10 +5004,20 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
}
/* List of Purex ELS */
- cmd_opcode[0] = ELS_FPIN;
- cmd_opcode[1] = ELS_RDP;
+ if (ql2xrdpenable) {
+ cmd_opcode[active_cnt] = ELS_RDP;
+ active_cnt++;
+ }
+ if (ha->flags.scm_supported_f) {
+ cmd_opcode[active_cnt] = ELS_FPIN;
+ active_cnt++;
+ }
+ if (ha->flags.edif_enabled) {
+ cmd_opcode[active_cnt] = ELS_AUTH_ELS;
+ active_cnt++;
+ }
- for (i = 0; i < PUREX_CMD_COUNT; i++) {
+ for (i = 0; i < active_cnt; i++) {
index = cmd_opcode[i] / 8;
purex_bit = cmd_opcode[i] % 8;
els_cmd_map[index] |= 1 << purex_bit;
@@ -6588,6 +6627,12 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->d_id.b.al_pa = pd->port_id[2];
fcport->d_id.b.rsvd_1 = 0;
+ ql_dbg(ql_dbg_disc, vha, 0x2062,
+ "%8phC SVC Param w3 %02x%02x",
+ fcport->port_name,
+ pd->prli_svc_param_word_3[1],
+ pd->prli_svc_param_word_3[0]);
+
if (NVME_TARGET(vha->hw, fcport)) {
fcport->port_type = FCT_NVME;
if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c7caf322f445..1c024055f8c5 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -65,7 +65,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
uint16_t vp_id;
struct qla_hw_data *ha = vha->hw;
unsigned long flags = 0;
- u8 i;
+ u32 i, bailout;
mutex_lock(&ha->vport_lock);
/*
@@ -75,21 +75,29 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
* ensures no active vp_list traversal while the vport is removed
* from the queue)
*/
- for (i = 0; i < 10; i++) {
- if (wait_event_timeout(vha->vref_waitq,
- !atomic_read(&vha->vref_count), HZ) > 0)
+ bailout = 0;
+ for (i = 0; i < 500; i++) {
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ if (atomic_read(&vha->vref_count) == 0) {
+ list_del(&vha->list);
+ qlt_update_vp_map(vha, RESET_VP_IDX);
+ bailout = 1;
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ if (bailout)
break;
+ else
+ msleep(20);
}
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- if (atomic_read(&vha->vref_count)) {
- ql_dbg(ql_dbg_vport, vha, 0xfffa,
- "vha->vref_count=%u timeout\n", vha->vref_count.counter);
- vha->vref_count = (atomic_t)ATOMIC_INIT(0);
+ if (!bailout) {
+ ql_log(ql_log_info, vha, 0xfffa,
+ "vha->vref_count=%u timeout\n", vha->vref_count.counter);
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_del(&vha->list);
+ qlt_update_vp_map(vha, RESET_VP_IDX);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
- list_del(&vha->list);
- qlt_update_vp_map(vha, RESET_VP_IDX);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
vp_id = vha->vp_idx;
ha->num_vhosts--;
@@ -158,6 +166,10 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
int ret = QLA_SUCCESS;
fc_port_t *fcport;
+ if (vha->hw->flags.edif_enabled)
+ /* delete sessions and flush sa_indexes */
+ qla2x00_wait_for_sess_deletion(vha);
+
if (vha->hw->flags.fw_started)
ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
@@ -166,7 +178,8 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
list_for_each_entry(fcport, &vha->vp_fcports, list)
fcport->logout_on_delete = 0;
- qla2x00_mark_all_devices_lost(vha);
+ if (!vha->hw->flags.edif_enabled)
+ qla2x00_wait_for_sess_deletion(vha);
/* Remove port id from vp target map */
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
@@ -257,13 +270,13 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
void
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
{
- scsi_qla_host_t *vha;
+ scsi_qla_host_t *vha, *tvp;
struct qla_hw_data *ha = rsp->hw;
int i = 0;
unsigned long flags;
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vha, &ha->vp_list, list) {
+ list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
if (vha->vp_idx) {
if (test_bit(VPORT_DELETE, &vha->dpc_flags))
continue;
@@ -416,7 +429,7 @@ void
qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- scsi_qla_host_t *vp;
+ scsi_qla_host_t *vp, *tvp;
unsigned long flags = 0;
if (vha->vp_idx)
@@ -430,7 +443,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
return;
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
if (vp->vp_idx) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 3e5c70a1d969..1c5da2dbd6f9 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -91,8 +91,9 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
struct qla_hw_data *ha;
struct qla_qpair *qpair;
- if (!qidx)
- qidx++;
+ /* Map admin queue and 1st IO queue to index 0 */
+ if (qidx)
+ qidx--;
vha = (struct scsi_qla_host *)lport->private;
ha = vha->hw;
@@ -108,19 +109,24 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
return -EINVAL;
}
- if (ha->queue_pair_map[qidx]) {
- *handle = ha->queue_pair_map[qidx];
- ql_log(ql_log_info, vha, 0x2121,
- "Returning existing qpair of %p for idx=%x\n",
- *handle, qidx);
- return 0;
- }
+ /* Use base qpair if max_qpairs is 0 */
+ if (!ha->max_qpairs) {
+ qpair = ha->base_qpair;
+ } else {
+ if (ha->queue_pair_map[qidx]) {
+ *handle = ha->queue_pair_map[qidx];
+ ql_log(ql_log_info, vha, 0x2121,
+ "Returning existing qpair of %p for idx=%x\n",
+ *handle, qidx);
+ return 0;
+ }
- qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
- if (qpair == NULL) {
- ql_log(ql_log_warn, vha, 0x2122,
- "Failed to allocate qpair\n");
- return -EINVAL;
+ qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
+ if (!qpair) {
+ ql_log(ql_log_warn, vha, 0x2122,
+ "Failed to allocate qpair\n");
+ return -EINVAL;
+ }
}
*handle = qpair;
@@ -221,13 +227,13 @@ static void qla_nvme_abort_work(struct work_struct *work)
srb_t *sp = priv->sp;
fc_port_t *fcport = sp->fcport;
struct qla_hw_data *ha = fcport->vha->hw;
- int rval;
+ int rval, abts_done_called = 1;
ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
- "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
- __func__, sp, sp->handle, fcport, fcport->deleted);
+ "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
+ __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
- if (!ha->flags.fw_started || fcport->deleted)
+ if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
goto out;
if (ha->flags.host_shutting_down) {
@@ -246,11 +252,19 @@ static void qla_nvme_abort_work(struct work_struct *work)
sp, sp->handle, fcport, rval);
/*
+ * If async tmf is enabled, the abort callback is called only on
+ * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
+ */
+ if (ql2xasynctmfenable &&
+ rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
+ abts_done_called = 0;
+
+ /*
* Returned before decreasing kref so that I/O requests
* are waited until ABTS complete. This kref is decreased
* at qla24xx_abort_sp_done function.
*/
- if (ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(sp))
+ if (abts_done_called && ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(sp))
return;
out:
/* kref_get was done before work was schedule. */
@@ -463,6 +477,10 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
} else if (fd->io_dir == 0) {
cmd_pkt->control_flags = 0;
}
+
+ if (sp->fcport->edif.enable && fd->io_dir != 0)
+ cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
+
/* Set BIT_13 of control flags for Async event */
if (vha->flags.nvme2_enabled &&
cmd->sqe.common.opcode == nvme_admin_async_event) {
@@ -727,18 +745,9 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
WARN_ON(vha->nvme_local_port);
- if (ha->max_req_queues < 3) {
- if (!ha->flags.max_req_queue_warned)
- ql_log(ql_log_info, vha, 0x2120,
- "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n",
- __func__, ha->max_req_queues);
- ha->flags.max_req_queue_warned = 1;
- return ret;
- }
-
qla_nvme_fc_transport.max_hw_queues =
min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
- (uint8_t)(ha->max_req_queues - 2));
+ (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
pinfo.node_name = wwn_to_u64(vha->node_name);
pinfo.port_name = wwn_to_u64(vha->port_name);
@@ -803,14 +812,14 @@ void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *ori
case CS_PORT_LOGGED_OUT:
/* BA_RJT was received for the ABTS */
case CS_PORT_CONFIG_CHG:
- ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09d,
+ ql_dbg(ql_dbg_async, vha, 0xf09d,
"Abort I/O IOCB completed with error, comp_status=%x\n",
comp_status);
break;
/* BA_RJT was received for the ABTS */
case CS_REJECT_RECEIVED:
- ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
+ ql_dbg(ql_dbg_async, vha, 0xf09e,
"BA_RJT was received for the ABTS rjt_vendorUnique = %u",
abt->fw.ba_rjt_vendorUnique);
ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
@@ -819,18 +828,18 @@ void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *ori
break;
case CS_COMPLETE:
- ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09f,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
"IOCB request is completed successfully comp_status=%x\n",
comp_status);
break;
case CS_IOCB_ERROR:
- ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf0a0,
+ ql_dbg(ql_dbg_async, vha, 0xf0a0,
"IOCB request is failed, comp_status=%x\n", comp_status);
break;
default:
- ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf0a1,
+ ql_dbg(ql_dbg_async, vha, 0xf0a1,
"Invalid Abort IO IOCB Completion Status %x\n",
comp_status);
break;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 615e44af1ca6..11aad97dfca8 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2166,7 +2166,6 @@ qla82xx_poll(int irq, void *dev_id)
struct qla_hw_data *ha;
struct rsp_que *rsp;
struct device_reg_82xx __iomem *reg;
- int status = 0;
uint32_t stat;
uint32_t host_int = 0;
uint16_t mb[8];
@@ -2195,7 +2194,6 @@ qla82xx_poll(int irq, void *dev_id)
case 0x10:
case 0x11:
qla82xx_mbx_completion(vha, MSW(stat));
- status |= MBX_INTERRUPT;
break;
case 0x12:
mb[0] = MSW(stat);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index cedd558f65eb..d2e40aaba734 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/blk-mq-pci.h>
#include <linux/refcount.h>
+#include <linux/crash_dump.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
@@ -53,6 +54,11 @@ static struct kmem_cache *ctx_cachep;
*/
uint ql_errlev = 0x8001;
+int ql2xsecenable;
+module_param(ql2xsecenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xsecenable,
+ "Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled.");
+
static int ql2xenableclass2;
module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xenableclass2,
@@ -849,7 +855,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
uint16_t hwq;
struct qla_qpair *qpair = NULL;
- tag = blk_mq_unique_tag(cmd->request);
+ tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
hwq = blk_mq_unique_tag_to_hwq(tag);
qpair = ha->queue_pair_map[hwq];
@@ -1120,12 +1126,28 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
int res;
+ /* Return 0 = sleep, x=wake */
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_init, vha, 0x00ec,
"tgt %p, fcport_count=%d\n",
vha, vha->fcport_count);
res = (vha->fcport_count == 0);
+ if (res) {
+ struct fc_port *fcport;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->deleted != QLA_SESS_DELETED) {
+ /* session(s) may not be fully logged in
+ * (ie fcport_count=0), but session
+ * deletion thread(s) may be inflight.
+ */
+
+ res = 0;
+ break;
+ }
+ }
+ }
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return res;
@@ -1367,18 +1389,27 @@ static char *reset_errors[] = {
};
static int
-__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
- struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int))
+qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
{
- scsi_qla_host_t *vha = shost_priv(cmd->device->host);
- fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+ struct scsi_device *sdev = cmd->device;
+ scsi_qla_host_t *vha = shost_priv(sdev->host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
+ struct qla_hw_data *ha = vha->hw;
int err;
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x803e,
+ "PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
+ return FAILED;
+ }
+
if (!fcport) {
return FAILED;
}
- err = fc_block_scsi_eh(cmd);
+ err = fc_block_rport(rport);
if (err != 0)
return err;
@@ -1386,8 +1417,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
return SUCCESS;
ql_log(ql_log_info, vha, 0x8009,
- "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no,
- cmd->device->id, cmd->device->lun, cmd);
+ "DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no,
+ sdev->id, sdev->lun, cmd);
err = 0;
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
@@ -1396,67 +1427,100 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
goto eh_reset_failed;
}
err = 2;
- if (do_reset(fcport, cmd->device->lun, 1)
+ if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1)
!= QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800c,
"do_reset failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
err = 3;
- if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
- cmd->device->lun, type) != QLA_SUCCESS) {
+ if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
+ sdev->lun, WAIT_LUN) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800d,
"wait for pending cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
ql_log(ql_log_info, vha, 0x800e,
- "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name,
- vha->host_no, cmd->device->id, cmd->device->lun, cmd);
+ "DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n",
+ vha->host_no, sdev->id, sdev->lun, cmd);
return SUCCESS;
eh_reset_failed:
ql_log(ql_log_info, vha, 0x800f,
- "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name,
- reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
+ "DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
+ reset_errors[err], vha->host_no, sdev->id, sdev->lun,
cmd);
vha->reset_cmd_err_cnt++;
return FAILED;
}
static int
-qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
+qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
{
- scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ struct scsi_device *sdev = cmd->device;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport));
struct qla_hw_data *ha = vha->hw;
+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+ int err;
if (qla2x00_isp_reg_stat(ha)) {
- ql_log(ql_log_info, vha, 0x803e,
+ ql_log(ql_log_info, vha, 0x803f,
"PCI/Register disconnect, exiting.\n");
qla_pci_set_eeh_busy(vha);
return FAILED;
}
- return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
- ha->isp_ops->lun_reset);
-}
+ if (!fcport) {
+ return FAILED;
+ }
-static int
-qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
-{
- scsi_qla_host_t *vha = shost_priv(cmd->device->host);
- struct qla_hw_data *ha = vha->hw;
+ err = fc_block_rport(rport);
+ if (err != 0)
+ return err;
- if (qla2x00_isp_reg_stat(ha)) {
- ql_log(ql_log_info, vha, 0x803f,
- "PCI/Register disconnect, exiting.\n");
- qla_pci_set_eeh_busy(vha);
- return FAILED;
+ if (fcport->deleted)
+ return SUCCESS;
+
+ ql_log(ql_log_info, vha, 0x8009,
+ "TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no,
+ sdev->id, cmd);
+
+ err = 0;
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800a,
+ "Wait for hba online failed for cmd=%p.\n", cmd);
+ goto eh_reset_failed;
}
+ err = 2;
+ if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800c,
+ "target_reset failed for cmd=%p.\n", cmd);
+ goto eh_reset_failed;
+ }
+ err = 3;
+ if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
+ 0, WAIT_TARGET) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800d,
+ "wait for pending cmds failed for cmd=%p.\n", cmd);
+ goto eh_reset_failed;
+ }
+
+ ql_log(ql_log_info, vha, 0x800e,
+ "TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n",
+ vha->host_no, sdev->id, cmd);
+
+ return SUCCESS;
- return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
- ha->isp_ops->target_reset);
+eh_reset_failed:
+ ql_log(ql_log_info, vha, 0x800f,
+ "TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
+ reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
+ cmd);
+ vha->reset_cmd_err_cnt++;
+ return FAILED;
}
/**************************************************************************
@@ -1478,7 +1542,6 @@ static int
qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
- fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
int ret = FAILED;
unsigned int id;
uint64_t lun;
@@ -1494,15 +1557,6 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
id = cmd->device->id;
lun = cmd->device->lun;
- if (!fcport) {
- return ret;
- }
-
- ret = fc_block_scsi_eh(cmd);
- if (ret != 0)
- return ret;
- ret = FAILED;
-
if (qla2x00_chip_is_down(vha))
return ret;
@@ -1742,7 +1796,7 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
}
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
- if (ret_cmd && blk_mq_request_started(cmd->request))
+ if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
sp->done(sp, res);
} else {
sp->done(sp, res);
@@ -2818,6 +2872,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
+ if (is_kdump_kernel()) {
+ ql2xmqsupport = 0;
+ ql2xallocfwdump = 0;
+ }
+
/* This may fail but that's ok */
pci_enable_pcie_error_reporting(pdev);
@@ -2835,6 +2894,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&ha->tgt.sess_lock);
spin_lock_init(&ha->tgt.atio_lock);
+ spin_lock_init(&ha->sadb_lock);
+ INIT_LIST_HEAD(&ha->sadb_tx_index_list);
+ INIT_LIST_HEAD(&ha->sadb_rx_index_list);
+
+ spin_lock_init(&ha->sadb_fp_lock);
+
+ if (qla_edif_sadb_build_free_pool(ha)) {
+ kfree(ha);
+ goto disable_device;
+ }
+
atomic_set(&ha->nvme_active_aen_cnt, 0);
/* Clear our data area */
@@ -3033,8 +3103,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->portnum = PCI_FUNC(ha->pdev->devfn);
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
- req_length = REQUEST_ENTRY_CNT_24XX;
- rsp_length = RESPONSE_ENTRY_CNT_2300;
+ req_length = REQUEST_ENTRY_CNT_83XX;
+ rsp_length = RESPONSE_ENTRY_CNT_83XX;
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
@@ -3460,6 +3530,8 @@ skip_dpc:
return 0;
probe_failed:
+ qla_enode_stop(base_vha);
+ qla_edb_stop(base_vha);
if (base_vha->gnl.l) {
dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
base_vha->gnl.l, base_vha->gnl.ldma);
@@ -3762,6 +3834,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
base_vha->gnl.l = NULL;
+ qla_enode_stop(base_vha);
+ qla_edb_stop(base_vha);
vfree(base_vha->scan.l);
@@ -3795,7 +3869,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_free_sysfs_attr(base_vha, true);
fc_remove_host(base_vha->host);
- qlt_remove_target_resources(ha);
scsi_remove_host(base_vha->host);
@@ -3867,6 +3940,9 @@ qla2x00_free_device(scsi_qla_host_t *vha)
qla82xx_md_free(vha);
+ qla_edif_sadb_release_free_pool(ha);
+ qla_edif_sadb_release(ha);
+
qla2x00_free_queues(ha);
}
@@ -3919,6 +3995,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
qla2x00_schedule_rport_del(vha, fcport);
}
+
/*
* We may need to retry the login, so don't change the state of the
* port but do the retries.
@@ -3941,6 +4018,16 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
"Mark all dev lost\n");
list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) &&
+ fcport->port_type == FCT_TARGET &&
+ !qla2x00_reset_active(vha)) {
+ ql_dbg(ql_dbg_disc, vha, 0x211a,
+ "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC",
+ fcport->flags, fcport->port_type,
+ fcport->d_id.b24, fcport->port_name);
+ continue;
+ }
fcport->scan_state = 0;
qlt_schedule_sess_for_deletion(fcport);
}
@@ -3972,15 +4059,20 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
struct req_que **req, struct rsp_que **rsp)
{
char name[16];
+ int rc;
ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
&ha->init_cb_dma, GFP_KERNEL);
if (!ha->init_cb)
goto fail;
- if (qlt_mem_alloc(ha) < 0)
+ rc = btree_init32(&ha->host_map);
+ if (rc)
goto fail_free_init_cb;
+ if (qlt_mem_alloc(ha) < 0)
+ goto fail_free_btree;
+
ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
if (!ha->gid_list)
@@ -3990,7 +4082,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->srb_mempool)
goto fail_free_gid_list;
- if (IS_P3P_TYPE(ha)) {
+ if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) {
/* Allocate cache for CT6 Ctx. */
if (!ctx_cachep) {
ctx_cachep = kmem_cache_create("qla2xxx_ctx",
@@ -4024,7 +4116,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
"init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
- if (IS_P3P_TYPE(ha) || ql2xenabledif) {
+ if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) {
ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
DSD_LIST_DMA_POOL_SIZE, 8, 0);
if (!ha->dl_dma_pool) {
@@ -4264,8 +4356,36 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
goto fail_flt_buffer;
}
+ /* allocate the purex dma pool */
+ ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ MAX_PAYLOAD, 8, 0);
+
+ if (!ha->purex_dma_pool) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+ "Unable to allocate purex_dma_pool.\n");
+ goto fail_flt;
+ }
+
+ ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
+ ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
+ ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL);
+
+ if (!ha->elsrej.c) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
+ "Alloc failed for els reject cmd.\n");
+ goto fail_elsrej;
+ }
+ ha->elsrej.c->er_cmd = ELS_LS_RJT;
+ ha->elsrej.c->er_reason = ELS_RJT_LOGIC;
+ ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
return 0;
+fail_elsrej:
+ dma_pool_destroy(ha->purex_dma_pool);
+fail_flt:
+ dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+ ha->flt, ha->flt_dma);
+
fail_flt_buffer:
dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
ha->sfp_data, ha->sfp_data_dma);
@@ -4356,6 +4476,8 @@ fail_free_gid_list:
ha->gid_list_dma = 0;
fail_free_tgt_mem:
qlt_mem_free(ha);
+fail_free_btree:
+ btree_destroy32(&ha->host_map);
fail_free_init_cb:
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
ha->init_cb_dma);
@@ -4772,10 +4894,21 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->dif_bundl_pool = NULL;
qlt_mem_free(ha);
+ qla_remove_hostmap(ha);
if (ha->init_cb)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
ha->init_cb, ha->init_cb_dma);
+
+ dma_pool_destroy(ha->purex_dma_pool);
+ ha->purex_dma_pool = NULL;
+
+ if (ha->elsrej.c) {
+ dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
+ ha->elsrej.c, ha->elsrej.cdma);
+ ha->elsrej.c = NULL;
+ }
+
ha->init_cb = NULL;
ha->init_cb_dma = 0;
@@ -4837,6 +4970,9 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
spin_lock_init(&vha->cmd_list_lock);
init_waitqueue_head(&vha->fcport_waitQ);
init_waitqueue_head(&vha->vref_waitq);
+ qla_enode_init(vha);
+ qla_edb_init(vha);
+
vha->gnl.size = sizeof(struct get_name_list_extended) *
(ha->max_loop_id + 1);
@@ -5080,6 +5216,11 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
WWN_SIZE);
fcport->fc4_type = e->u.new_sess.fc4_type;
+ if (NVME_PRIORITY(vha->hw, fcport))
+ fcport->do_prli_nvme = 1;
+ else
+ fcport->do_prli_nvme = 0;
+
if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
fcport->dm_login_expire = jiffies +
QLA_N2N_WAIT_TIME * HZ;
@@ -5327,6 +5468,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
e->u.fcport.fcport, false);
break;
+ case QLA_EVT_SA_REPLACE:
+ qla24xx_issue_sa_replace_iocb(vha, e);
+ break;
}
if (rc == EAGAIN) {
@@ -5376,6 +5520,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
if (atomic_read(&fcport->state) != FCS_ONLINE &&
fcport->login_retry) {
if (fcport->scan_state != QLA_FCPORT_FOUND ||
+ fcport->disc_state == DSC_LOGIN_AUTH_PEND ||
fcport->disc_state == DSC_LOGIN_COMPLETE)
continue;
@@ -7234,6 +7379,10 @@ qla2x00_timer(struct timer_list *t)
}
}
+ /* check if edif running */
+ if (vha->hw->flags.edif_enabled)
+ qla_edif_timer(vha);
+
/* Process any deferred work. */
if (!list_empty(&vha->work_list)) {
unsigned long flags;
@@ -7430,7 +7579,7 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
struct qla_qpair *qpair = NULL;
- struct scsi_qla_host *vp;
+ struct scsi_qla_host *vp, *tvp;
fc_port_t *fcport;
int i;
unsigned long flags;
@@ -7461,7 +7610,7 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
qla2x00_mark_all_devices_lost(vha);
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
qla2x00_mark_all_devices_lost(vp);
@@ -7475,7 +7624,7 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
list_for_each_entry(fcport, &vp->vp_fcports, list)
@@ -7887,7 +8036,7 @@ qla2x00_module_init(void)
BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
- BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2344);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2604);
BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424);
BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164);
BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 060c89237777..a0aeba69513d 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2936,7 +2936,6 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
liter += dburst - 1;
faddr += dburst - 1;
dwptr += dburst - 1;
- continue;
}
write_protect:
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index eb47140a899f..b3478ed9b12e 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -184,8 +184,7 @@ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
return QLA_SUCCESS;
}
-static inline
-struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
+struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha,
be_id_t d_id)
{
struct scsi_qla_host *host;
@@ -198,7 +197,7 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
key = be_to_port_id(d_id).b24;
- host = btree_lookup32(&vha->hw->tgt.host_map, key);
+ host = btree_lookup32(&vha->hw->host_map, key);
if (!host)
ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
"Unable to find host %06x\n", key);
@@ -299,7 +298,7 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
goto abort;
}
- host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
+ host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
if (host != NULL) {
ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
"Requeuing unknown ATIO_TYPE7 %p\n", u);
@@ -348,7 +347,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
switch (atio->u.raw.entry_type) {
case ATIO_TYPE7:
{
- struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
+ struct scsi_qla_host *host = qla_find_host_by_d_id(vha,
atio->u.isp24.fcp_hdr.d_id);
if (unlikely(NULL == host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe03e,
@@ -577,6 +576,18 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
sp->fcport->logout_on_delete = 1;
sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
sp->fcport->send_els_logo = 0;
+
+ if (sp->fcport->flags & FCF_FCSP_DEVICE) {
+ ql_dbg(ql_dbg_edif, vha, 0x20ef,
+ "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__,
+ sp->fcport->port_name);
+ qla2x00_set_fcport_disc_state(sp->fcport,
+ DSC_LOGIN_AUTH_PEND);
+ qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
+ sp->fcport->d_id.b24);
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24,
+ 0, sp->fcport);
+ }
break;
case SRB_NACK_PRLI:
@@ -624,6 +635,9 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
case SRB_NACK_PLOGI:
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
c = "PLOGI";
+ if (vha->hw->flags.edif_enabled &&
+ (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP))
+ fcport->flags |= FCF_FCSP_DEVICE;
break;
case SRB_NACK_PRLI:
fcport->fw_login_state = DSC_LS_PRLI_PEND;
@@ -693,7 +707,12 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
void qla24xx_delete_sess_fn(struct work_struct *work)
{
fc_port_t *fcport = container_of(work, struct fc_port, del_work);
- struct qla_hw_data *ha = fcport->vha->hw;
+ struct qla_hw_data *ha = NULL;
+
+ if (!fcport || !fcport->vha || !fcport->vha->hw)
+ return;
+
+ ha = fcport->vha->hw;
if (fcport->se_sess) {
ha->tgt.tgt_ops->shutdown_sess(fcport);
@@ -917,6 +936,11 @@ qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
qlt_port_logo_t *tmp;
int res;
+ if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
+ res = 0;
+ goto out;
+ }
+
mutex_lock(&vha->vha_tgt.tgt_mutex);
list_for_each_entry(tmp, &vha->logo_list, list) {
@@ -937,6 +961,7 @@ qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
list_del(&logo->list);
mutex_unlock(&vha->vha_tgt.tgt_mutex);
+out:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
"Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
@@ -965,6 +990,21 @@ void qlt_free_session_done(struct work_struct *work)
sess->send_els_logo);
if (!IS_SW_RESV_ADDR(sess->d_id)) {
+ if (ha->flags.edif_enabled &&
+ (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
+ sess->edif.authok = 0;
+ if (!ha->flags.host_shutting_down) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
+ __func__, sess->port_name);
+ qla2x00_release_all_sadb(vha, sess);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s bypassing release_all_sadb\n",
+ __func__);
+ }
+ qla_edif_sess_down(vha, sess);
+ }
qla2x00_mark_device_lost(vha, sess, 0);
if (sess->send_els_logo) {
@@ -972,6 +1012,7 @@ void qlt_free_session_done(struct work_struct *work)
logo.id = sess->d_id;
logo.cmd_count = 0;
+ INIT_LIST_HEAD(&logo.list);
if (!own)
qlt_send_first_logo(vha, &logo);
sess->send_els_logo = 0;
@@ -982,6 +1023,7 @@ void qlt_free_session_done(struct work_struct *work)
if (!own ||
(own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
+ sess->logout_completed = 0;
rc = qla2x00_post_async_logout_work(vha, sess,
NULL);
if (rc != QLA_SUCCESS)
@@ -1278,8 +1320,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
qla24xx_chk_fcp_state(sess);
ql_dbg(ql_log_warn, sess->vha, 0xe001,
- "Scheduling sess %p for deletion %8phC\n",
- sess, sess->port_name);
+ "Scheduling sess %p for deletion %8phC fc4_type %x\n",
+ sess, sess->port_name, sess->fc4_type);
WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
}
@@ -1720,6 +1762,12 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair,
nack->u.isp24.srr_reject_code_expl = srr_explan;
nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+ /* TODO qualify this with EDIF enable */
+ if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
+ (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
+ nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
+ }
+
ql_dbg(ql_dbg_tgt, vha, 0xe005,
"qla_target(%d): Sending 24xx Notify Ack %d\n",
vha->vp_idx, nack->u.isp24.status);
@@ -2571,6 +2619,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
struct ctio7_to_24xx *pkt;
struct atio_from_isp *atio = &prm->cmd->atio;
uint16_t temp;
+ struct qla_tgt_cmd *cmd = prm->cmd;
pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
prm->pkt = pkt;
@@ -2603,6 +2652,15 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
pkt->u.status0.ox_id = cpu_to_le16(temp);
pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
+ if (cmd->edif) {
+ if (cmd->dma_data_direction == DMA_TO_DEVICE)
+ prm->cmd->sess->edif.rx_bytes += cmd->bufflen;
+ if (cmd->dma_data_direction == DMA_FROM_DEVICE)
+ prm->cmd->sess->edif.tx_bytes += cmd->bufflen;
+
+ pkt->u.status0.edif_flags |= EF_EN_EDIF;
+ }
+
return 0;
}
@@ -3293,8 +3351,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
if (xmit_type & QLA_TGT_XMIT_STATUS) {
pkt->u.status0.scsi_status =
cpu_to_le16(prm.rq_result);
- pkt->u.status0.residual =
- cpu_to_le32(prm.residual);
+ if (!cmd->edif)
+ pkt->u.status0.residual =
+ cpu_to_le32(prm.residual);
+
pkt->u.status0.flags |= cpu_to_le16(
CTIO7_FLAGS_SEND_STATUS);
if (qlt_need_explicit_conf(cmd, 0)) {
@@ -3941,6 +4001,12 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
if (cmd == NULL)
return;
+ if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) &&
+ cmd->sess) {
+ qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess,
+ (struct ctio7_from_24xx *)ctio);
+ }
+
se_cmd = &cmd->se_cmd;
cmd->cmd_sent_to_fw = 0;
@@ -4011,6 +4077,16 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
qlt_handle_dif_error(qpair, cmd, ctio);
return;
}
+
+ case CTIO_FAST_AUTH_ERR:
+ case CTIO_FAST_INCOMP_PAD_LEN:
+ case CTIO_FAST_INVALID_REQ:
+ case CTIO_FAST_SPI_ERR:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
+ "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n",
+ vha->vp_idx, status, cmd->state, se_cmd);
+ break;
+
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
"qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
@@ -4312,6 +4388,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
qlt_assign_qpair(vha, cmd);
cmd->reset_count = vha->hw->base_qpair->chip_reset;
cmd->vp_idx = vha->vp_idx;
+ cmd->edif = sess->edif.enable;
return cmd;
}
@@ -4727,6 +4804,34 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
goto out;
}
+ if (vha->hw->flags.edif_enabled &&
+ !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
+ iocb->u.isp24.status_subcode == ELS_PLOGI &&
+ !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d Term INOT due to app not available lid=%d, NportID %06X ",
+ __func__, __LINE__, loop_id, port_id.b24);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ }
+
+ if (vha->hw->flags.edif_enabled) {
+ if (!(vha->e_dbell.db_flags & EDB_ACTIVE)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d Term INOT due to app not started lid=%d, NportID %06X ",
+ __func__, __LINE__, loop_id, port_id.b24);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ } else if (iocb->u.isp24.status_subcode == ELS_PLOGI &&
+ !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d Term INOT due to unsecure lid=%d, NportID %06X ",
+ __func__, __LINE__, loop_id, port_id.b24);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ }
+ }
+
pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
if (!pla) {
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
@@ -4792,6 +4897,20 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
sess->d_id = port_id;
sess->login_gen++;
+ sess->loop_id = loop_id;
+
+ if (iocb->u.isp24.status_subcode == ELS_PLOGI) {
+ /* remote port has assigned Port ID */
+ if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess))
+ vha->d_id = sess->d_id;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC - send port online\n",
+ __func__, sess->port_name);
+
+ qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
+ sess->d_id.b24);
+ }
if (iocb->u.isp24.status_subcode == ELS_PRLI) {
sess->fw_login_state = DSC_LS_PRLI_PEND;
@@ -4904,6 +5023,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess = qla2x00_find_fcport_by_wwpn(vha,
iocb->u.isp24.port_name, 1);
+ if (vha->hw->flags.edif_enabled && sess &&
+ (!(sess->flags & FCF_FCSP_DEVICE) ||
+ !sess->edif.authok)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC Term PRLI due to unauthorize PRLI\n",
+ __func__, __LINE__, iocb->u.isp24.port_name);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ break;
+ }
+
if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
@@ -4952,6 +5081,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
bool delete = false;
int sec;
+ if (vha->hw->flags.edif_enabled && sess &&
+ (!(sess->flags & FCF_FCSP_DEVICE) ||
+ !sess->edif.authok)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC Term PRLI due to unauthorize prli\n",
+ __func__, __LINE__, iocb->u.isp24.port_name);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ break;
+ }
+
spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
switch (sess->fw_login_state) {
case DSC_LS_PLOGI_PEND:
@@ -5141,7 +5280,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
/*
- * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ * ha->hardware_lock supposed to be held on entry.
+ * Might drop it, then reacquire.
*/
static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *iocb)
@@ -6444,15 +6584,15 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
return 0;
}
-void qlt_remove_target_resources(struct qla_hw_data *ha)
+void qla_remove_hostmap(struct qla_hw_data *ha)
{
struct scsi_qla_host *node;
u32 key = 0;
- btree_for_each_safe32(&ha->tgt.host_map, key, node)
- btree_remove32(&ha->tgt.host_map, key);
+ btree_for_each_safe32(&ha->host_map, key, node)
+ btree_remove32(&ha->host_map, key);
- btree_destroy32(&ha->tgt.host_map);
+ btree_destroy32(&ha->host_map);
}
static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
@@ -7080,8 +7220,7 @@ qlt_modify_vp_config(struct scsi_qla_host *vha,
void
qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
{
- int rc;
-
+ mutex_init(&base_vha->vha_tgt.tgt_mutex);
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -7094,7 +7233,6 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
}
- mutex_init(&base_vha->vha_tgt.tgt_mutex);
mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
INIT_LIST_HEAD(&base_vha->unknown_atio_list);
@@ -7103,11 +7241,6 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
qlt_clear_mode(base_vha);
- rc = btree_init32(&ha->tgt.host_map);
- if (rc)
- ql_log(ql_log_info, base_vha, 0xd03d,
- "Unable to initialize ha->host_map btree\n");
-
qlt_update_vp_map(base_vha, SET_VP_IDX);
}
@@ -7228,21 +7361,20 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
u32 key;
int rc;
- if (!QLA_TGT_MODE_ENABLED())
- return;
-
key = vha->d_id.b24;
switch (cmd) {
case SET_VP_IDX:
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
break;
case SET_AL_PA:
- slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+ slot = btree_lookup32(&vha->hw->host_map, key);
if (!slot) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
"Save vha in host_map %p %06x\n", vha, key);
- rc = btree_insert32(&vha->hw->tgt.host_map,
+ rc = btree_insert32(&vha->hw->host_map,
key, vha, GFP_ATOMIC);
if (rc)
ql_log(ql_log_info, vha, 0xd03e,
@@ -7252,17 +7384,19 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
}
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
"replace existing vha in host_map %p %06x\n", vha, key);
- btree_update32(&vha->hw->tgt.host_map, key, vha);
+ btree_update32(&vha->hw->host_map, key, vha);
break;
case RESET_VP_IDX:
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
break;
case RESET_AL_PA:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
"clear vha in host_map %p %06x\n", vha, key);
- slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+ slot = btree_lookup32(&vha->hw->host_map, key);
if (slot)
- btree_remove32(&vha->hw->tgt.host_map, key);
+ btree_remove32(&vha->hw->host_map, key);
vha->d_id.b24 = 0;
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 01620f3eab39..156b950ca7e7 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -176,6 +176,7 @@ struct nack_to_isp {
uint8_t reserved[2];
__le16 ox_id;
} __packed;
+#define NOTIFY_ACK_FLAGS_FCSP BIT_5
#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
@@ -238,6 +239,10 @@ struct ctio_to_2xxx {
#define CTIO_PORT_LOGGED_OUT 0x29
#define CTIO_PORT_CONF_CHANGED 0x2A
#define CTIO_SRR_RECEIVED 0x45
+#define CTIO_FAST_AUTH_ERR 0x63
+#define CTIO_FAST_INCOMP_PAD_LEN 0x65
+#define CTIO_FAST_INVALID_REQ 0x66
+#define CTIO_FAST_SPI_ERR 0x67
#endif
#ifndef CTIO_RET_TYPE
@@ -408,7 +413,16 @@ struct ctio7_to_24xx {
struct {
__le16 reserved1;
__le16 flags;
- __le32 residual;
+ union {
+ __le32 residual;
+ struct {
+ uint8_t rsvd1;
+ uint8_t edif_flags;
+#define EF_EN_EDIF BIT_0
+#define EF_NEW_SA BIT_1
+ uint16_t rsvd2;
+ };
+ };
__le16 ox_id;
__le16 scsi_status;
__le32 relative_offset;
@@ -446,7 +460,7 @@ struct ctio7_from_24xx {
uint8_t vp_index;
uint8_t reserved1[5];
__le32 exchange_address;
- __le16 reserved2;
+ __le16 edif_sa_index;
__le16 flags;
__le32 residual;
__le16 ox_id;
@@ -875,6 +889,7 @@ struct qla_tgt_cmd {
unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1;
unsigned int cmd_in_wq:1;
+ unsigned int edif:1;
/*
* This variable may be set from outside the LIO and I/O completion
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index da11829fa12d..055040cbef9b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.00.106-k"
+#define QLA2XXX_VERSION "10.02.06.200-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
-#define QLA_DRIVER_PATCH_VER 0
-#define QLA_DRIVER_BETA_VER 106
+#define QLA_DRIVER_PATCH_VER 6
+#define QLA_DRIVER_BETA_VER 200
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index f786ac2f5548..301bc09c8365 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -119,8 +119,8 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
* the interrupt_handler to think there are responses to be
* processed when there aren't.
*/
- ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0);
- ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0);
+ ha->shadow_regs->req_q_out = cpu_to_le32(0);
+ ha->shadow_regs->rsp_q_in = cpu_to_le32(0);
wmb();
writel(0, &ha->reg->req_q_in);
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index cbd1e6ffcd67..28eab07935ba 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -160,7 +160,7 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
/* No data being transferred */
- cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
+ cmd_entry->ttlByteCnt = cpu_to_le32(0);
return;
}
@@ -288,7 +288,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
/* Acquire hardware specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- index = (uint32_t)cmd->request->tag;
+ index = scsi_cmd_to_rq(cmd)->tag;
/*
* Check to see if adapter is online before placing request on
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 187d78aa4f67..cd71074f3abe 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -645,8 +645,8 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
/* Fill in the request and response queue information. */
init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
- init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
- init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
+ init_fw_cb->rqq_len = cpu_to_le16(REQUEST_QUEUE_DEPTH);
+ init_fw_cb->compq_len = cpu_to_le16(RESPONSE_QUEUE_DEPTH);
init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
@@ -656,20 +656,20 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
/* Set up required options. */
init_fw_cb->fw_options |=
- __constant_cpu_to_le16(FWOPT_SESSION_MODE |
- FWOPT_INITIATOR_MODE);
+ cpu_to_le16(FWOPT_SESSION_MODE |
+ FWOPT_INITIATOR_MODE);
if (is_qla80XX(ha))
init_fw_cb->fw_options |=
- __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
+ cpu_to_le16(FWOPT_ENABLE_CRBDB);
- init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
+ init_fw_cb->fw_options &= cpu_to_le16(~FWOPT_TARGET_MODE);
init_fw_cb->add_fw_options = 0;
init_fw_cb->add_fw_options |=
- __constant_cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
+ cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
init_fw_cb->add_fw_options |=
- __constant_cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
+ cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
!= QLA_SUCCESS) {
@@ -1613,7 +1613,7 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
- chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
+ chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE);
exit_get_chap:
dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
@@ -1655,7 +1655,7 @@ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
chap_table->secret_len = strlen(password);
strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1);
strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1);
- chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
+ chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE);
if (is_qla40XX(ha)) {
chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table);
@@ -1721,7 +1721,7 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
mutex_lock(&ha->chap_sem);
chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index;
- if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ if (chap_table->cookie != cpu_to_le16(CHAP_VALID_COOKIE)) {
rval = QLA_ERROR;
goto exit_unlock_uni_chap;
}
@@ -1784,7 +1784,7 @@ int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
for (i = 0; i < max_chap_entries; i++) {
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if (chap_table->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ cpu_to_le16(CHAP_VALID_COOKIE)) {
if (i > MAX_RESRV_CHAP_IDX && free_index == -1)
free_index = i;
continue;
@@ -2105,18 +2105,18 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
if (conn->max_recv_dlength)
fw_ddb_entry->iscsi_max_rcv_data_seg_len =
- __constant_cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
+ cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
if (sess->max_r2t)
fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
if (sess->first_burst)
fw_ddb_entry->iscsi_first_burst_len =
- __constant_cpu_to_le16((sess->first_burst / BYTE_UNITS));
+ cpu_to_le16((sess->first_burst / BYTE_UNITS));
if (sess->max_burst)
fw_ddb_entry->iscsi_max_burst_len =
- __constant_cpu_to_le16((sess->max_burst / BYTE_UNITS));
+ cpu_to_le16((sess->max_burst / BYTE_UNITS));
if (sess->time2wait)
fw_ddb_entry->iscsi_def_time2wait =
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 66a487795c53..47adff9f0506 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -3658,7 +3658,7 @@ qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
"Do ROM fast read failed\n");
goto done_read;
}
- dwptr[i] = __constant_cpu_to_le32(val);
+ dwptr[i] = cpu_to_le32(val);
}
done_read:
@@ -3721,9 +3721,9 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
goto no_flash_data;
}
- if (*wptr == __constant_cpu_to_le16(0xffff))
+ if (*wptr == cpu_to_le16(0xffff))
goto no_flash_data;
- if (flt->version != __constant_cpu_to_le16(1)) {
+ if (flt->version != cpu_to_le16(1)) {
DEBUG2(ql4_printk(KERN_INFO, ha, "Unsupported FLT detected: "
"version=0x%x length=0x%x checksum=0x%x.\n",
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
@@ -3826,7 +3826,7 @@ qla4_82xx_get_fdt_info(struct scsi_qla_host *ha)
qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
hw->flt_region_fdt << 2, OPTROM_BURST_SIZE);
- if (*wptr == __constant_cpu_to_le16(0xffff))
+ if (*wptr == cpu_to_le16(0xffff))
goto no_flash_data;
if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
@@ -3883,7 +3883,7 @@ qla4_82xx_get_idc_param(struct scsi_qla_host *ha)
qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
QLA82XX_IDC_PARAM_ADDR , 8);
- if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
+ if (*wptr == cpu_to_le32(0xffffffff)) {
ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
} else {
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 6ee7ea4c27e0..f1ea65c6e5f5 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -702,7 +702,7 @@ static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
*chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
if ((*chap_entry)->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ cpu_to_le16(CHAP_VALID_COOKIE)) {
*chap_entry = NULL;
} else {
rval = QLA_SUCCESS;
@@ -745,7 +745,7 @@ static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if ((chap_table->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
+ cpu_to_le16(CHAP_VALID_COOKIE)) &&
(i > MAX_RESRV_CHAP_IDX)) {
free_index = i;
break;
@@ -794,7 +794,7 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
for (i = chap_tbl_idx; i < max_chap_entries; i++) {
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if (chap_table->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE))
+ cpu_to_le16(CHAP_VALID_COOKIE))
continue;
chap_rec->chap_tbl_idx = i;
@@ -923,7 +923,7 @@ static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
goto exit_delete_chap;
}
- chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
+ chap_table->cookie = cpu_to_le16(0xFFFF);
offset = FLASH_CHAP_OFFSET |
(chap_tbl_idx * sizeof(struct ql4_chap_table));
@@ -6043,7 +6043,7 @@ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
for (i = 0; i < max_chap_entries; i++) {
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if (chap_table->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ cpu_to_le16(CHAP_VALID_COOKIE)) {
continue;
}
@@ -9282,7 +9282,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
DEBUG2(printk(KERN_INFO
"scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
"dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
- cmd, jiffies, cmd->request->timeout / HZ,
+ cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
rval = qla4xxx_isp_check_reg(ha);
@@ -9349,7 +9349,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
DEBUG2(printk(KERN_INFO
"scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
"to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
- ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
+ ha->host_no, cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
rval = qla4xxx_isp_check_reg(ha);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index d84e218d32cb..8e7e833a36cc 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -890,7 +890,7 @@ static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd,
cmd->control_flags |= CFLAG_WRITE;
else
cmd->control_flags |= CFLAG_READ;
- cmd->time_out = Cmnd->request->timeout/HZ;
+ cmd->time_out = scsi_cmd_to_rq(Cmnd)->timeout / HZ;
memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index d26025cf5de3..b241f9e3885c 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -190,7 +190,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
"(result %x)\n", cmd->result));
good_bytes = scsi_bufflen(cmd);
- if (!blk_rq_is_passthrough(cmd->request)) {
+ if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) {
int old_good_bytes = good_bytes;
drv = scsi_cmd_to_driver(cmd);
if (drv->done)
diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c
new file mode 100644
index 000000000000..81c3853a2a80
--- /dev/null
+++ b/drivers/scsi/scsi_bsg.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bsg.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/sg.h>
+#include "scsi_priv.h"
+
+#define uptr64(val) ((void __user *)(uintptr_t)(val))
+
+static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
+ fmode_t mode, unsigned int timeout)
+{
+ struct scsi_request *sreq;
+ struct request *rq;
+ struct bio *bio;
+ int ret;
+
+ if (hdr->protocol != BSG_PROTOCOL_SCSI ||
+ hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
+ return -EINVAL;
+ if (hdr->dout_xfer_len && hdr->din_xfer_len) {
+ pr_warn_once("BIDI support in bsg has been removed.\n");
+ return -EOPNOTSUPP;
+ }
+
+ rq = blk_get_request(q, hdr->dout_xfer_len ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+ rq->timeout = timeout;
+
+ ret = -ENOMEM;
+ sreq = scsi_req(rq);
+ sreq->cmd_len = hdr->request_len;
+ if (sreq->cmd_len > BLK_MAX_CDB) {
+ sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
+ if (!sreq->cmd)
+ goto out_put_request;
+ }
+
+ ret = -EFAULT;
+ if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
+ goto out_free_cmd;
+ ret = -EPERM;
+ if (!scsi_cmd_allowed(sreq->cmd, mode))
+ goto out_free_cmd;
+
+ ret = 0;
+ if (hdr->dout_xfer_len) {
+ ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
+ hdr->dout_xfer_len, GFP_KERNEL);
+ } else if (hdr->din_xfer_len) {
+ ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
+ hdr->din_xfer_len, GFP_KERNEL);
+ }
+
+ if (ret)
+ goto out_free_cmd;
+
+ bio = rq->bio;
+ blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
+
+ /*
+ * fill in all the output members
+ */
+ hdr->device_status = sreq->result & 0xff;
+ hdr->transport_status = host_byte(sreq->result);
+ hdr->driver_status = 0;
+ if (scsi_status_is_check_condition(sreq->result))
+ hdr->driver_status = DRIVER_SENSE;
+ hdr->info = 0;
+ if (hdr->device_status || hdr->transport_status || hdr->driver_status)
+ hdr->info |= SG_INFO_CHECK;
+ hdr->response_len = 0;
+
+ if (sreq->sense_len && hdr->response) {
+ int len = min_t(unsigned int, hdr->max_response_len,
+ sreq->sense_len);
+
+ if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
+ ret = -EFAULT;
+ else
+ hdr->response_len = len;
+ }
+
+ if (rq_data_dir(rq) == READ)
+ hdr->din_resid = sreq->resid_len;
+ else
+ hdr->dout_resid = sreq->resid_len;
+
+ blk_rq_unmap_user(bio);
+
+out_free_cmd:
+ scsi_req_free_cmd(scsi_req(rq));
+out_put_request:
+ blk_put_request(rq);
+ return ret;
+}
+
+struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev)
+{
+ return bsg_register_queue(sdev->request_queue, &sdev->sdev_gendev,
+ dev_name(&sdev->sdev_gendev), scsi_bsg_sg_io_fn);
+}
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index 90349498f686..6e50e81a8216 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -7,9 +7,18 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
#include <scsi/scsi_common.h>
+MODULE_LICENSE("GPL v2");
+
+/* Command group 3 is reserved and should never be used. */
+const unsigned char scsi_command_size_tbl[8] = {
+ 6, 10, 10, 12, 16, 12, 10, 10
+};
+EXPORT_SYMBOL(scsi_command_size_tbl);
+
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
* You may not alter any existing entry (although adding new ones is
* encouraged once assigned by ANSI/INCITS T10).
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 58f69366bdcc..66f507469a31 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -3076,6 +3076,7 @@ static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
unsigned int sectors, u32 ei_lba)
{
+ int ret = 0;
unsigned int i;
sector_t sector;
struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
@@ -3083,26 +3084,33 @@ static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
struct t10_pi_tuple *sdt;
for (i = 0; i < sectors; i++, ei_lba++) {
- int ret;
-
sector = start_sec + i;
sdt = dif_store(sip, sector);
if (sdt->app_tag == cpu_to_be16(0xffff))
continue;
- ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
- ei_lba);
- if (ret) {
- dif_errors++;
- return ret;
+ /*
+ * Because scsi_debug acts as both initiator and
+ * target we proceed to verify the PI even if
+ * RDPROTECT=3. This is done so the "initiator" knows
+ * which type of error to return. Otherwise we would
+ * have to iterate over the PI twice.
+ */
+ if (scp->cmnd[1] >> 5) { /* RDPROTECT */
+ ret = dif_verify(sdt, lba2fake_store(sip, sector),
+ sector, ei_lba);
+ if (ret) {
+ dif_errors++;
+ break;
+ }
}
}
dif_copy_prot(scp, start_sec, sectors, true);
dix_reads++;
- return 0;
+ return ret;
}
static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
@@ -3196,12 +3204,29 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
- int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
-
- if (prot_ret) {
- read_unlock(macc_lckp);
- mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
- return illegal_condition_result;
+ switch (prot_verify_read(scp, lba, num, ei_lba)) {
+ case 1: /* Guard tag error */
+ if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
+ read_unlock(macc_lckp);
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+ return check_condition_result;
+ } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
+ read_unlock(macc_lckp);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+ return illegal_condition_result;
+ }
+ break;
+ case 3: /* Reference tag error */
+ if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
+ read_unlock(macc_lckp);
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
+ return check_condition_result;
+ } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
+ read_unlock(macc_lckp);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
+ return illegal_condition_result;
+ }
+ break;
}
}
@@ -3232,28 +3257,6 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return 0;
}
-static void dump_sector(unsigned char *buf, int len)
-{
- int i, j, n;
-
- pr_err(">>> Sector Dump <<<\n");
- for (i = 0 ; i < len ; i += 16) {
- char b[128];
-
- for (j = 0, n = 0; j < 16; j++) {
- unsigned char c = buf[i+j];
-
- if (c >= 0x20 && c < 0x7e)
- n += scnprintf(b + n, sizeof(b) - n,
- " %c ", buf[i+j]);
- else
- n += scnprintf(b + n, sizeof(b) - n,
- "%02x ", buf[i+j]);
- }
- pr_err("%04d: %s\n", i, b);
- }
-}
-
static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
unsigned int sectors, u32 ei_lba)
{
@@ -3299,10 +3302,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
sdt = piter.addr + ppage_offset;
daddr = diter.addr + dpage_offset;
- ret = dif_verify(sdt, daddr, sector, ei_lba);
- if (ret) {
- dump_sector(daddr, sdebug_sector_size);
- goto out;
+ if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
+ ret = dif_verify(sdt, daddr, sector, ei_lba);
+ if (ret)
+ goto out;
}
sector++;
@@ -3480,12 +3483,29 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
- int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
-
- if (prot_ret) {
- write_unlock(macc_lckp);
- mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
- return illegal_condition_result;
+ switch (prot_verify_write(scp, lba, num, ei_lba)) {
+ case 1: /* Guard tag error */
+ if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
+ write_unlock(macc_lckp);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+ return illegal_condition_result;
+ } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
+ write_unlock(macc_lckp);
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+ return check_condition_result;
+ }
+ break;
+ case 3: /* Reference tag error */
+ if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
+ write_unlock(macc_lckp);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
+ return illegal_condition_result;
+ } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
+ write_unlock(macc_lckp);
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
+ return check_condition_result;
+ }
+ break;
}
}
@@ -4702,7 +4722,7 @@ fini:
static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
{
u16 hwq;
- u32 tag = blk_mq_unique_tag(cmnd->request);
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
hwq = blk_mq_unique_tag_to_hwq(tag);
@@ -4715,7 +4735,7 @@ static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
static u32 get_tag(struct scsi_cmnd *cmnd)
{
- return blk_mq_unique_tag(cmnd->request);
+ return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
}
/* Queued (deferred) command completions converge here. */
@@ -5364,7 +5384,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
{
bool new_sd_dp;
bool inject = false;
- bool hipri = (cmnd->request->cmd_flags & REQ_HIPRI);
+ bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI;
int k, num_in_q, qdepth;
unsigned long iflags;
u64 ns_from_boot = 0;
@@ -5567,8 +5587,9 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
if (unlikely(sd_dp->aborted)) {
- sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
- blk_abort_request(cmnd->request);
+ sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
+ scsi_cmd_to_rq(cmnd)->tag);
+ blk_abort_request(scsi_cmd_to_rq(cmnd));
atomic_set(&sdeb_inject_pending, 0);
sd_dp->aborted = false;
}
@@ -7394,7 +7415,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
(u32)cmd[k]);
}
sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
- blk_mq_unique_tag(scp->request), b);
+ blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
}
if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
return SCSI_MLQUEUE_HOST_BUSY;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index d33355ab6e14..c7080454aea9 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -171,6 +171,7 @@ static struct {
{"FUJITSU", "ETERNUS_DXM", "*", BLIST_RETRY_ASC_C1},
{"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"Generic", "USB Storage-SMC", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, /* FW: 0180 and 0207 */
+ {"Generic", "Ultra HS-SD/MMC", "2.09", BLIST_IGN_MEDIA_CHANGE | BLIST_INQUIRY_36},
{"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
{"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
{"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 58a252c38992..b6c86cce57bf 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -242,7 +242,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
*/
static void scsi_eh_reset(struct scsi_cmnd *scmd)
{
- if (!blk_rq_is_passthrough(scmd->request)) {
+ if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) {
struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
if (sdrv->eh_reset)
sdrv->eh_reset(scmd);
@@ -1182,7 +1182,7 @@ static enum scsi_disposition scsi_request_sense(struct scsi_cmnd *scmd)
static enum scsi_disposition
scsi_eh_action(struct scsi_cmnd *scmd, enum scsi_disposition rtn)
{
- if (!blk_rq_is_passthrough(scmd->request)) {
+ if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) {
struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
if (sdrv->eh_action)
rtn = sdrv->eh_action(scmd, rtn);
@@ -1750,21 +1750,23 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
*/
int scsi_noretry_cmd(struct scsi_cmnd *scmd)
{
+ struct request *req = scsi_cmd_to_rq(scmd);
+
switch (host_byte(scmd->result)) {
case DID_OK:
break;
case DID_TIME_OUT:
goto check_type;
case DID_BUS_BUSY:
- return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
+ return req->cmd_flags & REQ_FAILFAST_TRANSPORT;
case DID_PARITY:
- return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
+ return req->cmd_flags & REQ_FAILFAST_DEV;
case DID_ERROR:
if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
return 0;
fallthrough;
case DID_SOFT_ERROR:
- return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
+ return req->cmd_flags & REQ_FAILFAST_DRIVER;
}
if (!scsi_status_is_check_condition(scmd->result))
@@ -1775,8 +1777,7 @@ check_type:
* assume caller has checked sense and determined
* the check condition was retryable.
*/
- if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
- blk_rq_is_passthrough(scmd->request))
+ if (req->cmd_flags & REQ_FAILFAST_DEV || blk_rq_is_passthrough(req))
return 1;
return 0;
@@ -2376,7 +2377,6 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
scmd = (struct scsi_cmnd *)(rq + 1);
scsi_init_command(dev, scmd);
- scmd->request = rq;
scmd->cmnd = scsi_req(rq)->cmd;
scmd->scsi_done = scsi_reset_provider_done_command;
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 0d13610cd6bf..6ff2207bd45a 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/uaccess.h>
+#include <linux/cdrom.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -63,29 +64,6 @@ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
return 1;
}
-/*
-
- * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host.
- * The IOCTL_NORMAL_TIMEOUT and NORMAL_RETRIES variables are used.
- *
- * dev is the SCSI device struct ptr, *(int *) arg is the length of the
- * input data, if any, not including the command string & counts,
- * *((int *)arg + 1) is the output buffer size in bytes.
- *
- * *(char *) ((int *) arg)[2] the actual command byte.
- *
- * Note that if more than MAX_BUF bytes are requested to be transferred,
- * the ioctl will fail with error EINVAL.
- *
- * This size *does not* include the initial lengths that were passed.
- *
- * The SCSI command is read from the memory location immediately after the
- * length words, and the input data is right after the command. The SCSI
- * routines know the command size based on the opcode decode.
- *
- * The output area is then filled in starting from the command byte.
- */
-
static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
int timeout, int retries)
{
@@ -189,10 +167,732 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
? -EFAULT: 0;
}
+static int sg_get_version(int __user *p)
+{
+ static const int sg_version_num = 30527;
+ return put_user(sg_version_num, p);
+}
-static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg)
+static int sg_set_timeout(struct scsi_device *sdev, int __user *p)
{
- char scsi_cmd[MAX_COMMAND_SIZE];
+ int timeout, err = get_user(timeout, p);
+
+ if (!err)
+ sdev->sg_timeout = clock_t_to_jiffies(timeout);
+
+ return err;
+}
+
+static int sg_get_reserved_size(struct scsi_device *sdev, int __user *p)
+{
+ int val = min(sdev->sg_reserved_size,
+ queue_max_bytes(sdev->request_queue));
+
+ return put_user(val, p);
+}
+
+static int sg_set_reserved_size(struct scsi_device *sdev, int __user *p)
+{
+ int size, err = get_user(size, p);
+
+ if (err)
+ return err;
+
+ if (size < 0)
+ return -EINVAL;
+
+ sdev->sg_reserved_size = min_t(unsigned int, size,
+ queue_max_bytes(sdev->request_queue));
+ return 0;
+}
+
+/*
+ * will always return that we are ATAPI even for a real SCSI drive, I'm not
+ * so sure this is worth doing anything about (why would you care??)
+ */
+static int sg_emulated_host(struct request_queue *q, int __user *p)
+{
+ return put_user(1, p);
+}
+
+static int scsi_get_idlun(struct scsi_device *sdev, void __user *argp)
+{
+ struct scsi_idlun v = {
+ .dev_id = (sdev->id & 0xff) +
+ ((sdev->lun & 0xff) << 8) +
+ ((sdev->channel & 0xff) << 16) +
+ ((sdev->host->host_no & 0xff) << 24),
+ .host_unique_id = sdev->host->unique_id
+ };
+ if (copy_to_user(argp, &v, sizeof(struct scsi_idlun)))
+ return -EFAULT;
+ return 0;
+}
+
+static int scsi_send_start_stop(struct scsi_device *sdev, int data)
+{
+ u8 cdb[MAX_COMMAND_SIZE] = { };
+
+ cdb[0] = START_STOP;
+ cdb[4] = data;
+ return ioctl_internal_command(sdev, cdb, START_STOP_TIMEOUT,
+ NORMAL_RETRIES);
+}
+
+/*
+ * Check if the given command is allowed.
+ *
+ * Only a subset of commands are allowed for unprivileged users. Commands used
+ * to format the media, update the firmware, etc. are not permitted.
+ */
+bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode)
+{
+ /* root can do any command. */
+ if (capable(CAP_SYS_RAWIO))
+ return true;
+
+ /* Anybody who can open the device can do a read-safe command */
+ switch (cmd[0]) {
+ /* Basic read-only commands */
+ case TEST_UNIT_READY:
+ case REQUEST_SENSE:
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case READ_BUFFER:
+ case READ_DEFECT_DATA:
+ case READ_CAPACITY: /* also GPCMD_READ_CDVD_CAPACITY */
+ case READ_LONG:
+ case INQUIRY:
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ case LOG_SENSE:
+ case START_STOP:
+ case GPCMD_VERIFY_10:
+ case VERIFY_16:
+ case REPORT_LUNS:
+ case SERVICE_ACTION_IN_16:
+ case RECEIVE_DIAGNOSTIC:
+ case MAINTENANCE_IN: /* also GPCMD_SEND_KEY, which is a write command */
+ case GPCMD_READ_BUFFER_CAPACITY:
+ /* Audio CD commands */
+ case GPCMD_PLAY_CD:
+ case GPCMD_PLAY_AUDIO_10:
+ case GPCMD_PLAY_AUDIO_MSF:
+ case GPCMD_PLAY_AUDIO_TI:
+ case GPCMD_PAUSE_RESUME:
+ /* CD/DVD data reading */
+ case GPCMD_READ_CD:
+ case GPCMD_READ_CD_MSF:
+ case GPCMD_READ_DISC_INFO:
+ case GPCMD_READ_DVD_STRUCTURE:
+ case GPCMD_READ_HEADER:
+ case GPCMD_READ_TRACK_RZONE_INFO:
+ case GPCMD_READ_SUBCHANNEL:
+ case GPCMD_READ_TOC_PMA_ATIP:
+ case GPCMD_REPORT_KEY:
+ case GPCMD_SCAN:
+ case GPCMD_GET_CONFIGURATION:
+ case GPCMD_READ_FORMAT_CAPACITIES:
+ case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
+ case GPCMD_GET_PERFORMANCE:
+ case GPCMD_SEEK:
+ case GPCMD_STOP_PLAY_SCAN:
+ /* ZBC */
+ case ZBC_IN:
+ return true;
+ /* Basic writing commands */
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_VERIFY:
+ case WRITE_12:
+ case WRITE_VERIFY_12:
+ case WRITE_16:
+ case WRITE_LONG:
+ case WRITE_LONG_2:
+ case WRITE_SAME:
+ case WRITE_SAME_16:
+ case WRITE_SAME_32:
+ case ERASE:
+ case GPCMD_MODE_SELECT_10:
+ case MODE_SELECT:
+ case LOG_SELECT:
+ case GPCMD_BLANK:
+ case GPCMD_CLOSE_TRACK:
+ case GPCMD_FLUSH_CACHE:
+ case GPCMD_FORMAT_UNIT:
+ case GPCMD_REPAIR_RZONE_TRACK:
+ case GPCMD_RESERVE_RZONE_TRACK:
+ case GPCMD_SEND_DVD_STRUCTURE:
+ case GPCMD_SEND_EVENT:
+ case GPCMD_SEND_OPC:
+ case GPCMD_SEND_CUE_SHEET:
+ case GPCMD_SET_SPEED:
+ case GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL:
+ case GPCMD_LOAD_UNLOAD:
+ case GPCMD_SET_STREAMING:
+ case GPCMD_SET_READ_AHEAD:
+ /* ZBC */
+ case ZBC_OUT:
+ return (mode & FMODE_WRITE);
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(scsi_cmd_allowed);
+
+static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq,
+ struct sg_io_hdr *hdr, fmode_t mode)
+{
+ struct scsi_request *req = scsi_req(rq);
+
+ if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
+ return -EFAULT;
+ if (!scsi_cmd_allowed(req->cmd, mode))
+ return -EPERM;
+
+ /*
+ * fill in request structure
+ */
+ req->cmd_len = hdr->cmd_len;
+
+ rq->timeout = msecs_to_jiffies(hdr->timeout);
+ if (!rq->timeout)
+ rq->timeout = sdev->sg_timeout;
+ if (!rq->timeout)
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ if (rq->timeout < BLK_MIN_SG_TIMEOUT)
+ rq->timeout = BLK_MIN_SG_TIMEOUT;
+
+ return 0;
+}
+
+static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
+ struct bio *bio)
+{
+ struct scsi_request *req = scsi_req(rq);
+ int r, ret = 0;
+
+ /*
+ * fill in all the output members
+ */
+ hdr->status = req->result & 0xff;
+ hdr->masked_status = status_byte(req->result);
+ hdr->msg_status = COMMAND_COMPLETE;
+ hdr->host_status = host_byte(req->result);
+ hdr->driver_status = 0;
+ if (scsi_status_is_check_condition(hdr->status))
+ hdr->driver_status = DRIVER_SENSE;
+ hdr->info = 0;
+ if (hdr->masked_status || hdr->host_status || hdr->driver_status)
+ hdr->info |= SG_INFO_CHECK;
+ hdr->resid = req->resid_len;
+ hdr->sb_len_wr = 0;
+
+ if (req->sense_len && hdr->sbp) {
+ int len = min((unsigned int) hdr->mx_sb_len, req->sense_len);
+
+ if (!copy_to_user(hdr->sbp, req->sense, len))
+ hdr->sb_len_wr = len;
+ else
+ ret = -EFAULT;
+ }
+
+ r = blk_rq_unmap_user(bio);
+ if (!ret)
+ ret = r;
+
+ return ret;
+}
+
+static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
+ struct sg_io_hdr *hdr, fmode_t mode)
+{
+ unsigned long start_time;
+ ssize_t ret = 0;
+ int writing = 0;
+ int at_head = 0;
+ struct request *rq;
+ struct scsi_request *req;
+ struct bio *bio;
+
+ if (hdr->interface_id != 'S')
+ return -EINVAL;
+
+ if (hdr->dxfer_len > (queue_max_hw_sectors(sdev->request_queue) << 9))
+ return -EIO;
+
+ if (hdr->dxfer_len)
+ switch (hdr->dxfer_direction) {
+ default:
+ return -EINVAL;
+ case SG_DXFER_TO_DEV:
+ writing = 1;
+ break;
+ case SG_DXFER_TO_FROM_DEV:
+ case SG_DXFER_FROM_DEV:
+ break;
+ }
+ if (hdr->flags & SG_FLAG_Q_AT_HEAD)
+ at_head = 1;
+
+ ret = -ENOMEM;
+ rq = blk_get_request(sdev->request_queue, writing ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+ req = scsi_req(rq);
+
+ if (hdr->cmd_len > BLK_MAX_CDB) {
+ req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
+ if (!req->cmd)
+ goto out_put_request;
+ }
+
+ ret = scsi_fill_sghdr_rq(sdev, rq, hdr, mode);
+ if (ret < 0)
+ goto out_free_cdb;
+
+ ret = 0;
+ if (hdr->iovec_count) {
+ struct iov_iter i;
+ struct iovec *iov = NULL;
+
+ ret = import_iovec(rq_data_dir(rq), hdr->dxferp,
+ hdr->iovec_count, 0, &iov, &i);
+ if (ret < 0)
+ goto out_free_cdb;
+
+ /* SG_IO howto says that the shorter of the two wins */
+ iov_iter_truncate(&i, hdr->dxfer_len);
+
+ ret = blk_rq_map_user_iov(rq->q, rq, NULL, &i, GFP_KERNEL);
+ kfree(iov);
+ } else if (hdr->dxfer_len)
+ ret = blk_rq_map_user(rq->q, rq, NULL, hdr->dxferp,
+ hdr->dxfer_len, GFP_KERNEL);
+
+ if (ret)
+ goto out_free_cdb;
+
+ bio = rq->bio;
+ req->retries = 0;
+
+ start_time = jiffies;
+
+ blk_execute_rq(disk, rq, at_head);
+
+ hdr->duration = jiffies_to_msecs(jiffies - start_time);
+
+ ret = scsi_complete_sghdr_rq(rq, hdr, bio);
+
+out_free_cdb:
+ scsi_req_free_cmd(req);
+out_put_request:
+ blk_put_request(rq);
+ return ret;
+}
+
+/**
+ * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
+ * @q: request queue to send scsi commands down
+ * @disk: gendisk to operate on (option)
+ * @mode: mode used to open the file through which the ioctl has been
+ * submitted
+ * @sic: userspace structure describing the command to perform
+ *
+ * Send down the scsi command described by @sic to the device below
+ * the request queue @q. If @file is non-NULL it's used to perform
+ * fine-grained permission checks that allow users to send down
+ * non-destructive SCSI commands. If the caller has a struct gendisk
+ * available it should be passed in as @disk to allow the low level
+ * driver to use the information contained in it. A non-NULL @disk
+ * is only allowed if the caller knows that the low level driver doesn't
+ * need it (e.g. in the scsi subsystem).
+ *
+ * Notes:
+ * - This interface is deprecated - users should use the SG_IO
+ * interface instead, as this is a more flexible approach to
+ * performing SCSI commands on a device.
+ * - The SCSI command length is determined by examining the 1st byte
+ * of the given command. There is no way to override this.
+ * - Data transfers are limited to PAGE_SIZE
+ * - The length (x + y) must be at least OMAX_SB_LEN bytes long to
+ * accommodate the sense buffer when an error occurs.
+ * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
+ * old code will not be surprised.
+ * - If a Unix error occurs (e.g. ENOMEM) then the user will receive
+ * a negative return and the Unix error code in 'errno'.
+ * If the SCSI command succeeds then 0 is returned.
+ * Positive numbers returned are the compacted SCSI error codes (4
+ * bytes in one int) where the lowest byte is the SCSI status.
+ */
+static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
+ fmode_t mode, struct scsi_ioctl_command __user *sic)
+{
+ enum { OMAX_SB_LEN = 16 }; /* For backward compatibility */
+ struct request *rq;
+ struct scsi_request *req;
+ int err;
+ unsigned int in_len, out_len, bytes, opcode, cmdlen;
+ char *buffer = NULL;
+
+ if (!sic)
+ return -EINVAL;
+
+ /*
+ * get in an out lengths, verify they don't exceed a page worth of data
+ */
+ if (get_user(in_len, &sic->inlen))
+ return -EFAULT;
+ if (get_user(out_len, &sic->outlen))
+ return -EFAULT;
+ if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
+ return -EINVAL;
+ if (get_user(opcode, sic->data))
+ return -EFAULT;
+
+ bytes = max(in_len, out_len);
+ if (bytes) {
+ buffer = kzalloc(bytes, GFP_NOIO | GFP_USER | __GFP_NOWARN);
+ if (!buffer)
+ return -ENOMEM;
+
+ }
+
+ rq = blk_get_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto error_free_buffer;
+ }
+ req = scsi_req(rq);
+
+ cmdlen = COMMAND_SIZE(opcode);
+
+ /*
+ * get command and data to send to device, if any
+ */
+ err = -EFAULT;
+ req->cmd_len = cmdlen;
+ if (copy_from_user(req->cmd, sic->data, cmdlen))
+ goto error;
+
+ if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
+ goto error;
+
+ err = -EPERM;
+ if (!scsi_cmd_allowed(req->cmd, mode))
+ goto error;
+
+ /* default. possible overridden later */
+ req->retries = 5;
+
+ switch (opcode) {
+ case SEND_DIAGNOSTIC:
+ case FORMAT_UNIT:
+ rq->timeout = FORMAT_UNIT_TIMEOUT;
+ req->retries = 1;
+ break;
+ case START_STOP:
+ rq->timeout = START_STOP_TIMEOUT;
+ break;
+ case MOVE_MEDIUM:
+ rq->timeout = MOVE_MEDIUM_TIMEOUT;
+ break;
+ case READ_ELEMENT_STATUS:
+ rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
+ break;
+ case READ_DEFECT_DATA:
+ rq->timeout = READ_DEFECT_DATA_TIMEOUT;
+ req->retries = 1;
+ break;
+ default:
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ break;
+ }
+
+ if (bytes) {
+ err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO);
+ if (err)
+ goto error;
+ }
+
+ blk_execute_rq(disk, rq, 0);
+
+ err = req->result & 0xff; /* only 8 bit SCSI status */
+ if (err) {
+ if (req->sense_len && req->sense) {
+ bytes = (OMAX_SB_LEN > req->sense_len) ?
+ req->sense_len : OMAX_SB_LEN;
+ if (copy_to_user(sic->data, req->sense, bytes))
+ err = -EFAULT;
+ }
+ } else {
+ if (copy_to_user(sic->data, buffer, out_len))
+ err = -EFAULT;
+ }
+
+error:
+ blk_put_request(rq);
+
+error_free_buffer:
+ kfree(buffer);
+
+ return err;
+}
+
+int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp)
+{
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall()) {
+ struct compat_sg_io_hdr hdr32 = {
+ .interface_id = hdr->interface_id,
+ .dxfer_direction = hdr->dxfer_direction,
+ .cmd_len = hdr->cmd_len,
+ .mx_sb_len = hdr->mx_sb_len,
+ .iovec_count = hdr->iovec_count,
+ .dxfer_len = hdr->dxfer_len,
+ .dxferp = (uintptr_t)hdr->dxferp,
+ .cmdp = (uintptr_t)hdr->cmdp,
+ .sbp = (uintptr_t)hdr->sbp,
+ .timeout = hdr->timeout,
+ .flags = hdr->flags,
+ .pack_id = hdr->pack_id,
+ .usr_ptr = (uintptr_t)hdr->usr_ptr,
+ .status = hdr->status,
+ .masked_status = hdr->masked_status,
+ .msg_status = hdr->msg_status,
+ .sb_len_wr = hdr->sb_len_wr,
+ .host_status = hdr->host_status,
+ .driver_status = hdr->driver_status,
+ .resid = hdr->resid,
+ .duration = hdr->duration,
+ .info = hdr->info,
+ };
+
+ if (copy_to_user(argp, &hdr32, sizeof(hdr32)))
+ return -EFAULT;
+
+ return 0;
+ }
+#endif
+
+ if (copy_to_user(argp, hdr, sizeof(*hdr)))
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL(put_sg_io_hdr);
+
+int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp)
+{
+#ifdef CONFIG_COMPAT
+ struct compat_sg_io_hdr hdr32;
+
+ if (in_compat_syscall()) {
+ if (copy_from_user(&hdr32, argp, sizeof(hdr32)))
+ return -EFAULT;
+
+ *hdr = (struct sg_io_hdr) {
+ .interface_id = hdr32.interface_id,
+ .dxfer_direction = hdr32.dxfer_direction,
+ .cmd_len = hdr32.cmd_len,
+ .mx_sb_len = hdr32.mx_sb_len,
+ .iovec_count = hdr32.iovec_count,
+ .dxfer_len = hdr32.dxfer_len,
+ .dxferp = compat_ptr(hdr32.dxferp),
+ .cmdp = compat_ptr(hdr32.cmdp),
+ .sbp = compat_ptr(hdr32.sbp),
+ .timeout = hdr32.timeout,
+ .flags = hdr32.flags,
+ .pack_id = hdr32.pack_id,
+ .usr_ptr = compat_ptr(hdr32.usr_ptr),
+ .status = hdr32.status,
+ .masked_status = hdr32.masked_status,
+ .msg_status = hdr32.msg_status,
+ .sb_len_wr = hdr32.sb_len_wr,
+ .host_status = hdr32.host_status,
+ .driver_status = hdr32.driver_status,
+ .resid = hdr32.resid,
+ .duration = hdr32.duration,
+ .info = hdr32.info,
+ };
+
+ return 0;
+ }
+#endif
+
+ if (copy_from_user(hdr, argp, sizeof(*hdr)))
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL(get_sg_io_hdr);
+
+#ifdef CONFIG_COMPAT
+struct compat_cdrom_generic_command {
+ unsigned char cmd[CDROM_PACKET_SIZE];
+ compat_caddr_t buffer;
+ compat_uint_t buflen;
+ compat_int_t stat;
+ compat_caddr_t sense;
+ unsigned char data_direction;
+ unsigned char pad[3];
+ compat_int_t quiet;
+ compat_int_t timeout;
+ compat_caddr_t unused;
+};
+#endif
+
+static int scsi_get_cdrom_generic_arg(struct cdrom_generic_command *cgc,
+ const void __user *arg)
+{
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall()) {
+ struct compat_cdrom_generic_command cgc32;
+
+ if (copy_from_user(&cgc32, arg, sizeof(cgc32)))
+ return -EFAULT;
+
+ *cgc = (struct cdrom_generic_command) {
+ .buffer = compat_ptr(cgc32.buffer),
+ .buflen = cgc32.buflen,
+ .stat = cgc32.stat,
+ .sense = compat_ptr(cgc32.sense),
+ .data_direction = cgc32.data_direction,
+ .quiet = cgc32.quiet,
+ .timeout = cgc32.timeout,
+ .unused = compat_ptr(cgc32.unused),
+ };
+ memcpy(&cgc->cmd, &cgc32.cmd, CDROM_PACKET_SIZE);
+ return 0;
+ }
+#endif
+ if (copy_from_user(cgc, arg, sizeof(*cgc)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc,
+ void __user *arg)
+{
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall()) {
+ struct compat_cdrom_generic_command cgc32 = {
+ .buffer = (uintptr_t)(cgc->buffer),
+ .buflen = cgc->buflen,
+ .stat = cgc->stat,
+ .sense = (uintptr_t)(cgc->sense),
+ .data_direction = cgc->data_direction,
+ .quiet = cgc->quiet,
+ .timeout = cgc->timeout,
+ .unused = (uintptr_t)(cgc->unused),
+ };
+ memcpy(&cgc32.cmd, &cgc->cmd, CDROM_PACKET_SIZE);
+
+ if (copy_to_user(arg, &cgc32, sizeof(cgc32)))
+ return -EFAULT;
+
+ return 0;
+ }
+#endif
+ if (copy_to_user(arg, cgc, sizeof(*cgc)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int scsi_cdrom_send_packet(struct scsi_device *sdev, struct gendisk *disk,
+ fmode_t mode, void __user *arg)
+{
+ struct cdrom_generic_command cgc;
+ struct sg_io_hdr hdr;
+ int err;
+
+ err = scsi_get_cdrom_generic_arg(&cgc, arg);
+ if (err)
+ return err;
+
+ cgc.timeout = clock_t_to_jiffies(cgc.timeout);
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.interface_id = 'S';
+ hdr.cmd_len = sizeof(cgc.cmd);
+ hdr.dxfer_len = cgc.buflen;
+ switch (cgc.data_direction) {
+ case CGC_DATA_UNKNOWN:
+ hdr.dxfer_direction = SG_DXFER_UNKNOWN;
+ break;
+ case CGC_DATA_WRITE:
+ hdr.dxfer_direction = SG_DXFER_TO_DEV;
+ break;
+ case CGC_DATA_READ:
+ hdr.dxfer_direction = SG_DXFER_FROM_DEV;
+ break;
+ case CGC_DATA_NONE:
+ hdr.dxfer_direction = SG_DXFER_NONE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hdr.dxferp = cgc.buffer;
+ hdr.sbp = cgc.sense;
+ if (hdr.sbp)
+ hdr.mx_sb_len = sizeof(struct request_sense);
+ hdr.timeout = jiffies_to_msecs(cgc.timeout);
+ hdr.cmdp = ((struct cdrom_generic_command __user *) arg)->cmd;
+ hdr.cmd_len = sizeof(cgc.cmd);
+
+ err = sg_io(sdev, disk, &hdr, mode);
+ if (err == -EFAULT)
+ return -EFAULT;
+
+ if (hdr.status)
+ return -EIO;
+
+ cgc.stat = err;
+ cgc.buflen = hdr.resid;
+ if (scsi_put_cdrom_generic_arg(&cgc, arg))
+ return -EFAULT;
+
+ return err;
+}
+
+static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
+ fmode_t mode, void __user *argp)
+{
+ struct sg_io_hdr hdr;
+ int error;
+
+ error = get_sg_io_hdr(&hdr, argp);
+ if (error)
+ return error;
+ error = sg_io(sdev, disk, &hdr, mode);
+ if (error == -EFAULT)
+ return error;
+ if (put_sg_io_hdr(&hdr, argp))
+ return -EFAULT;
+ return error;
+}
+
+/**
+ * scsi_ioctl - Dispatch ioctl to scsi device
+ * @sdev: scsi device receiving ioctl
+ * @disk: disk receiving the ioctl
+ * @mode: mode the block/char device is opened with
+ * @cmd: which ioctl is it
+ * @arg: data associated with ioctl
+ *
+ * Description: The scsi_ioctl() function differs from most ioctls in that it
+ * does not take a major/minor number as the dev field. Rather, it takes
+ * a pointer to a &struct scsi_device.
+ */
+int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode,
+ int cmd, void __user *arg)
+{
+ struct request_queue *q = sdev->request_queue;
struct scsi_sense_hdr sense_hdr;
/* Check for deprecated ioctls ... all the ioctls which don't
@@ -212,26 +912,34 @@ static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg
}
switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN: {
- struct scsi_idlun v = {
- .dev_id = (sdev->id & 0xff)
- + ((sdev->lun & 0xff) << 8)
- + ((sdev->channel & 0xff) << 16)
- + ((sdev->host->host_no & 0xff) << 24),
- .host_unique_id = sdev->host->unique_id
- };
- if (copy_to_user(arg, &v, sizeof(struct scsi_idlun)))
- return -EFAULT;
- return 0;
- }
+ case SG_GET_VERSION_NUM:
+ return sg_get_version(arg);
+ case SG_SET_TIMEOUT:
+ return sg_set_timeout(sdev, arg);
+ case SG_GET_TIMEOUT:
+ return jiffies_to_clock_t(sdev->sg_timeout);
+ case SG_GET_RESERVED_SIZE:
+ return sg_get_reserved_size(sdev, arg);
+ case SG_SET_RESERVED_SIZE:
+ return sg_set_reserved_size(sdev, arg);
+ case SG_EMULATED_HOST:
+ return sg_emulated_host(q, arg);
+ case SG_IO:
+ return scsi_ioctl_sg_io(sdev, disk, mode, arg);
+ case SCSI_IOCTL_SEND_COMMAND:
+ return sg_scsi_ioctl(q, disk, mode, arg);
+ case CDROM_SEND_PACKET:
+ return scsi_cdrom_send_packet(sdev, disk, mode, arg);
+ case CDROMCLOSETRAY:
+ return scsi_send_start_stop(sdev, 3);
+ case CDROMEJECT:
+ return scsi_send_start_stop(sdev, 2);
+ case SCSI_IOCTL_GET_IDLUN:
+ return scsi_get_idlun(sdev, arg);
case SCSI_IOCTL_GET_BUS_NUMBER:
return put_user(sdev->host->host_no, (int __user *)arg);
case SCSI_IOCTL_PROBE_HOST:
return ioctl_probe(sdev->host, arg);
- case SCSI_IOCTL_SEND_COMMAND:
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
- return -EACCES;
- return sg_scsi_ioctl(sdev->request_queue, NULL, 0, arg);
case SCSI_IOCTL_DOORLOCK:
return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
case SCSI_IOCTL_DOORUNLOCK:
@@ -240,66 +948,27 @@ static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg
return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT,
NORMAL_RETRIES, &sense_hdr);
case SCSI_IOCTL_START_UNIT:
- scsi_cmd[0] = START_STOP;
- scsi_cmd[1] = 0;
- scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
- scsi_cmd[4] = 1;
- return ioctl_internal_command(sdev, scsi_cmd,
- START_STOP_TIMEOUT, NORMAL_RETRIES);
+ return scsi_send_start_stop(sdev, 1);
case SCSI_IOCTL_STOP_UNIT:
- scsi_cmd[0] = START_STOP;
- scsi_cmd[1] = 0;
- scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
- scsi_cmd[4] = 0;
- return ioctl_internal_command(sdev, scsi_cmd,
- START_STOP_TIMEOUT, NORMAL_RETRIES);
+ return scsi_send_start_stop(sdev, 0);
case SCSI_IOCTL_GET_PCI:
return scsi_ioctl_get_pci(sdev, arg);
case SG_SCSI_RESET:
return scsi_ioctl_reset(sdev, arg);
}
- return -ENOIOCTLCMD;
-}
-
-/**
- * scsi_ioctl - Dispatch ioctl to scsi device
- * @sdev: scsi device receiving ioctl
- * @cmd: which ioctl is it
- * @arg: data associated with ioctl
- *
- * Description: The scsi_ioctl() function differs from most ioctls in that it
- * does not take a major/minor number as the dev field. Rather, it takes
- * a pointer to a &struct scsi_device.
- */
-int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
-{
- int ret = scsi_ioctl_common(sdev, cmd, arg);
-
- if (ret != -ENOIOCTLCMD)
- return ret;
-
- if (sdev->host->hostt->ioctl)
- return sdev->host->hostt->ioctl(sdev, cmd, arg);
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(scsi_ioctl);
#ifdef CONFIG_COMPAT
-int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
-{
- int ret = scsi_ioctl_common(sdev, cmd, arg);
-
- if (ret != -ENOIOCTLCMD)
- return ret;
-
- if (sdev->host->hostt->compat_ioctl)
+ if (in_compat_syscall()) {
+ if (!sdev->host->hostt->compat_ioctl)
+ return -EINVAL;
return sdev->host->hostt->compat_ioctl(sdev, cmd, arg);
-
- return ret;
-}
-EXPORT_SYMBOL(scsi_compat_ioctl);
+ }
#endif
+ if (!sdev->host->hostt->ioctl)
+ return -EINVAL;
+ return sdev->host->hostt->ioctl(sdev, cmd, arg);
+}
+EXPORT_SYMBOL(scsi_ioctl);
/*
* We can process a reset even when a device isn't fully operable.
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7456a26aef51..572673873ddf 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -119,13 +119,15 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
{
- if (cmd->request->rq_flags & RQF_DONTPREP) {
- cmd->request->rq_flags &= ~RQF_DONTPREP;
+ struct request *rq = scsi_cmd_to_rq(cmd);
+
+ if (rq->rq_flags & RQF_DONTPREP) {
+ rq->rq_flags &= ~RQF_DONTPREP;
scsi_mq_uninit_cmd(cmd);
} else {
WARN_ON_ONCE(true);
}
- blk_mq_requeue_request(cmd->request, true);
+ blk_mq_requeue_request(rq, true);
}
/**
@@ -164,7 +166,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
*/
cmd->result = 0;
- blk_mq_requeue_request(cmd->request, true);
+ blk_mq_requeue_request(scsi_cmd_to_rq(cmd), true);
}
/**
@@ -478,7 +480,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
{
- if (!blk_rq_is_passthrough(cmd->request)) {
+ if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) {
struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
if (drv->uninit_command)
@@ -624,7 +626,7 @@ static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
{
- struct request *req = cmd->request;
+ struct request *req = scsi_cmd_to_rq(cmd);
unsigned long wait_for;
if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
@@ -643,7 +645,7 @@ static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
struct request_queue *q = cmd->device->request_queue;
- struct request *req = cmd->request;
+ struct request *req = scsi_cmd_to_rq(cmd);
int level = 0;
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
@@ -818,7 +820,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
{
bool sense_valid;
bool sense_current = true; /* false implies "deferred sense" */
- struct request *req = cmd->request;
+ struct request *req = scsi_cmd_to_rq(cmd);
struct scsi_sense_hdr sshdr;
sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
@@ -907,7 +909,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
struct request_queue *q = cmd->device->request_queue;
- struct request *req = cmd->request;
+ struct request *req = scsi_cmd_to_rq(cmd);
blk_status_t blk_stat = BLK_STS_OK;
if (unlikely(result)) /* a nz result may or may not be an error */
@@ -978,7 +980,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
unsigned short nr_segs = blk_rq_nr_phys_segments(rq);
struct scatterlist *last_sg = NULL;
blk_status_t ret;
@@ -1083,8 +1085,13 @@ EXPORT_SYMBOL(scsi_alloc_sgtables);
static void scsi_initialize_rq(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_request *req = &cmd->req;
+
+ memset(req->__cmd, 0, sizeof(req->__cmd));
+ req->cmd = req->__cmd;
+ req->cmd_len = BLK_MAX_CDB;
+ req->sense_len = 0;
- scsi_req_init(&cmd->req);
init_rcu_head(&cmd->rcu);
cmd->jiffies_at_alloc = jiffies;
cmd->retries = 0;
@@ -1107,7 +1114,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{
void *buf = cmd->sense_buffer;
void *prot = cmd->prot_sdb;
- struct request *rq = blk_mq_rq_from_pdu(cmd);
+ struct request *rq = scsi_cmd_to_rq(cmd);
unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
unsigned long jiffies_at_alloc;
int retries, to_clear;
@@ -1533,8 +1540,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
scsi_init_command(sdev, cmd);
- cmd->request = req;
- cmd->tag = req->tag;
cmd->prot_op = SCSI_PROT_NORMAL;
if (blk_rq_bytes(req))
cmd->sc_data_direction = rq_dma_dir(req);
@@ -1572,12 +1577,12 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
static void scsi_mq_done(struct scsi_cmnd *cmd)
{
- if (unlikely(blk_should_fake_timeout(cmd->request->q)))
+ if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q)))
return;
if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
return;
trace_scsi_dispatch_cmd_done(cmd);
- blk_mq_complete_request(cmd->request);
+ blk_mq_complete_request(scsi_cmd_to_rq(cmd));
}
static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index 2317717935e9..ed9572252a42 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -28,8 +28,9 @@ static void scsi_log_release_buffer(char *bufptr)
static inline const char *scmd_name(const struct scsi_cmnd *scmd)
{
- return scmd->request->rq_disk ?
- scmd->request->rq_disk->disk_name : NULL;
+ struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
+
+ return rq->rq_disk ? rq->rq_disk->disk_name : NULL;
}
static size_t sdev_format_header(char *logbuf, size_t logbuf_len,
@@ -91,7 +92,7 @@ void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
if (!logbuf)
return;
off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd),
- scmd->request->tag);
+ scsi_cmd_to_rq((struct scsi_cmnd *)scmd)->tag);
if (off < logbuf_len) {
va_start(args, fmt);
off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args);
@@ -188,7 +189,7 @@ void scsi_print_command(struct scsi_cmnd *cmd)
return;
off = sdev_format_header(logbuf, logbuf_len,
- scmd_name(cmd), cmd->request->tag);
+ scmd_name(cmd), scsi_cmd_to_rq(cmd)->tag);
if (off >= logbuf_len)
goto out_printk;
off += scnprintf(logbuf + off, logbuf_len - off, "CDB: ");
@@ -210,7 +211,7 @@ void scsi_print_command(struct scsi_cmnd *cmd)
off = sdev_format_header(logbuf, logbuf_len,
scmd_name(cmd),
- cmd->request->tag);
+ scsi_cmd_to_rq(cmd)->tag);
if (!WARN_ON(off > logbuf_len - 58)) {
off += scnprintf(logbuf + off, logbuf_len - off,
"CDB[%02x]: ", k);
@@ -373,7 +374,8 @@ EXPORT_SYMBOL(__scsi_print_sense);
/* Normalize and print sense buffer in SCSI command */
void scsi_print_sense(const struct scsi_cmnd *cmd)
{
- scsi_log_print_sense(cmd->device, scmd_name(cmd), cmd->request->tag,
+ scsi_log_print_sense(cmd->device, scmd_name(cmd),
+ scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag,
cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
}
EXPORT_SYMBOL(scsi_print_sense);
@@ -391,8 +393,8 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
if (!logbuf)
return;
- off = sdev_format_header(logbuf, logbuf_len,
- scmd_name(cmd), cmd->request->tag);
+ off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd),
+ scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag);
if (off >= logbuf_len)
goto out_printk;
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index eae2235f79b5..6d9152031a40 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -7,6 +7,7 @@
#include <scsi/scsi_device.h>
#include <linux/sbitmap.h>
+struct bsg_device;
struct request_queue;
struct request;
struct scsi_cmnd;
@@ -180,6 +181,8 @@ static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
#endif
+struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev);
+
extern int scsi_device_max_queue_depth(struct scsi_device *sdev);
/*
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 5b6996a2401b..fe22191522a3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -267,6 +267,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
*/
sdev->borken = 1;
+ sdev->sg_reserved_size = INT_MAX;
+
q = blk_mq_init_queue(&sdev->host->tag_set);
if (IS_ERR(q)) {
/* release fn is set up in scsi_sysfs_device_initialise, so
@@ -974,6 +976,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_UNMAP_LIMIT_WS)
sdev->unmap_limit_for_ws = 1;
+ if (*bflags & BLIST_IGN_MEDIA_CHANGE)
+ sdev->ignore_media_change = 1;
+
sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
if (*bflags & BLIST_TRY_VPD_PAGES)
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index c0d31119d6d7..86793259e541 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -13,6 +13,7 @@
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
+#include <linux/bsg.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -1333,7 +1334,6 @@ static int scsi_target_add(struct scsi_target *starget)
int scsi_sysfs_add_sdev(struct scsi_device *sdev)
{
int error, i;
- struct request_queue *rq = sdev->request_queue;
struct scsi_target *starget = sdev->sdev_target;
error = scsi_target_add(starget);
@@ -1372,12 +1372,19 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
transport_add_device(&sdev->sdev_gendev);
sdev->is_visible = 1;
- error = bsg_scsi_register_queue(rq, &sdev->sdev_gendev);
- if (error)
- /* we're treating error on bsg register as non-fatal,
- * so pretend nothing went wrong */
- sdev_printk(KERN_INFO, sdev,
- "Failed to register bsg queue, errno=%d\n", error);
+ if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) {
+ sdev->bsg_dev = scsi_bsg_register_queue(sdev);
+ if (IS_ERR(sdev->bsg_dev)) {
+ /*
+ * We're treating error on bsg register as non-fatal, so
+ * pretend nothing went wrong.
+ */
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to register bsg queue, errno=%d\n",
+ error);
+ sdev->bsg_dev = NULL;
+ }
+ }
/* add additional host specific attributes */
if (sdev->host->hostt->sdev_attrs) {
@@ -1439,7 +1446,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
sysfs_remove_groups(&sdev->sdev_gendev.kobj,
sdev->host->hostt->sdev_groups);
- bsg_unregister_queue(sdev->request_queue);
+ if (IS_ENABLED(CONFIG_BLK_DEV_BSG) && sdev->bsg_dev)
+ bsg_unregister_queue(sdev->bsg_dev);
device_unregister(&sdev->sdev_dev);
transport_remove_device(dev);
device_del(dev);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 49748cd817a5..60e406bcf42a 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3804,7 +3804,7 @@ bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd)
struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
- (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT)) {
+ (scsi_cmd_to_rq(scmd)->cmd_flags & REQ_FAILFAST_TRANSPORT)) {
set_host_byte(scmd, DID_TRANSPORT_MARGINAL);
return false;
}
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 5af7a10e9514..bd72c38d7bfc 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -1230,7 +1230,7 @@ int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd)
{
if (cmd->flags & SCMD_TAGGED) {
*msg++ = SIMPLE_QUEUE_TAG;
- *msg++ = cmd->request->tag;
+ *msg++ = scsi_cmd_to_rq(cmd)->tag;
return 2;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 610ebba0d66e..cbd9999f93a6 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -110,6 +110,7 @@ static void sd_shutdown(struct device *);
static int sd_suspend_system(struct device *);
static int sd_suspend_runtime(struct device *);
static int sd_resume(struct device *);
+static int sd_resume_runtime(struct device *);
static void sd_rescan(struct device *);
static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
static void sd_uninit_command(struct scsi_cmnd *SCpnt);
@@ -605,7 +606,7 @@ static const struct dev_pm_ops sd_pm_ops = {
.poweroff = sd_suspend_system,
.restore = sd_resume,
.runtime_suspend = sd_suspend_runtime,
- .runtime_resume = sd_resume,
+ .runtime_resume = sd_resume_runtime,
};
static struct scsi_driver sd_template = {
@@ -776,8 +777,9 @@ static unsigned int sd_prot_flag_mask(unsigned int prot_op)
static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
unsigned int dix, unsigned int dif)
{
- struct bio *bio = scmd->request->bio;
- unsigned int prot_op = sd_prot_op(rq_data_dir(scmd->request), dix, dif);
+ struct request *rq = scsi_cmd_to_rq(scmd);
+ struct bio *bio = rq->bio;
+ unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif);
unsigned int protect = 0;
if (dix) { /* DIX Type 0, 1, 2, 3 */
@@ -868,7 +870,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdp = cmd->device;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -904,7 +906,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
bool unmap)
{
struct scsi_device *sdp = cmd->device;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -936,7 +938,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
bool unmap)
{
struct scsi_device *sdp = cmd->device;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -966,7 +968,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
@@ -1063,7 +1065,7 @@ out:
**/
static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
struct bio *bio = rq->bio;
@@ -1112,7 +1114,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
/* flush requests don't perform I/O, zero the S/G table */
@@ -1210,7 +1212,7 @@ static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
@@ -1324,7 +1326,7 @@ fail:
static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
switch (req_op(rq)) {
case REQ_OP_DISCARD:
@@ -1370,7 +1372,7 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
- struct request *rq = SCpnt->request;
+ struct request *rq = scsi_cmd_to_rq(SCpnt);
u8 *cmnd;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
@@ -1530,11 +1532,11 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
}
/**
- * sd_ioctl_common - process an ioctl
+ * sd_ioctl - process an ioctl
* @bdev: target block device
* @mode: FMODE_* mask
* @cmd: ioctl command number
- * @p: this is third argument given to ioctl(2) system call.
+ * @arg: this is third argument given to ioctl(2) system call.
* Often contains a pointer.
*
* Returns 0 if successful (some ioctls return positive numbers on
@@ -1543,20 +1545,20 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
* Note: most ioctls are forward onto the block subsystem or further
* down in the scsi subsystem.
**/
-static int sd_ioctl_common(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, void __user *p)
+static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
struct gendisk *disk = bdev->bd_disk;
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
+ void __user *p = (void __user *)arg;
int error;
SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
"cmd=0x%x\n", disk->disk_name, cmd));
- error = scsi_verify_blk_ioctl(bdev, cmd);
- if (error < 0)
- return error;
+ if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
+ return -ENOIOCTLCMD;
/*
* If we are in the middle of error recovery, don't let anyone
@@ -1567,27 +1569,11 @@ static int sd_ioctl_common(struct block_device *bdev, fmode_t mode,
error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
(mode & FMODE_NDELAY) != 0);
if (error)
- goto out;
+ return error;
if (is_sed_ioctl(cmd))
return sed_ioctl(sdkp->opal_dev, cmd, p);
-
- /*
- * Send SCSI addressing ioctls directly to mid level, send other
- * ioctls to block level and then onto mid level if they can't be
- * resolved.
- */
- switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- error = scsi_ioctl(sdp, cmd, p);
- break;
- default:
- error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
- break;
- }
-out:
- return error;
+ return scsi_ioctl(sdp, disk, mode, cmd, p);
}
static void set_media_not_present(struct scsi_disk *sdkp)
@@ -1770,34 +1756,6 @@ static void sd_rescan(struct device *dev)
sd_revalidate_disk(sdkp->disk);
}
-static int sd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- void __user *p = (void __user *)arg;
- int ret;
-
- ret = sd_ioctl_common(bdev, mode, cmd, p);
- if (ret != -ENOTTY)
- return ret;
-
- return scsi_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
-}
-
-#ifdef CONFIG_COMPAT
-static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- void __user *p = compat_ptr(arg);
- int ret;
-
- ret = sd_ioctl_common(bdev, mode, cmd, p);
- if (ret != -ENOTTY)
- return ret;
-
- return scsi_compat_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
-}
-#endif
-
static char sd_pr_type(enum pr_type type)
{
switch (type) {
@@ -1898,9 +1856,7 @@ static const struct block_device_operations sd_fops = {
.release = sd_release,
.ioctl = sd_ioctl,
.getgeo = sd_getgeo,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sd_compat_ioctl,
-#endif
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.check_events = sd_check_events,
.unlock_native_capacity = sd_unlock_native_capacity,
.report_zones = sd_zbc_report_zones,
@@ -1921,7 +1877,7 @@ static const struct block_device_operations sd_fops = {
**/
static void sd_eh_reset(struct scsi_cmnd *scmd)
{
- struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
/* New SCSI EH run, reset gate variable */
sdkp->ignore_medium_access_errors = false;
@@ -1941,7 +1897,7 @@ static void sd_eh_reset(struct scsi_cmnd *scmd)
**/
static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
{
- struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
struct scsi_device *sdev = scmd->device;
if (!scsi_device_online(sdev) ||
@@ -1982,7 +1938,7 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
{
- struct request *req = scmd->request;
+ struct request *req = scsi_cmd_to_rq(scmd);
struct scsi_device *sdev = scmd->device;
unsigned int transferred, good_bytes;
u64 start_lba, end_lba, bad_lba;
@@ -2037,8 +1993,8 @@ static int sd_done(struct scsi_cmnd *SCpnt)
unsigned int sector_size = SCpnt->device->sector_size;
unsigned int resid;
struct scsi_sense_hdr sshdr;
- struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
- struct request *req = SCpnt->request;
+ struct request *req = scsi_cmd_to_rq(SCpnt);
+ struct scsi_disk *sdkp = scsi_disk(req->rq_disk);
int sense_valid = 0;
int sense_deferred = 0;
@@ -2181,8 +2137,10 @@ sd_spinup_disk(struct scsi_disk *sdkp)
* doesn't have any media in it, don't bother
* with any more polling.
*/
- if (media_not_present(sdkp, &sshdr))
+ if (media_not_present(sdkp, &sshdr)) {
+ sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
return;
+ }
if (the_result)
sense_valid = scsi_sense_valid(&sshdr);
@@ -3718,6 +3676,25 @@ static int sd_resume(struct device *dev)
return ret;
}
+static int sd_resume_runtime(struct device *dev)
+{
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ if (sdp->ignore_media_change) {
+ /* clear the device's sense data */
+ static const u8 cmd[10] = { REQUEST_SENSE };
+
+ if (scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL,
+ NULL, sdp->request_queue->rq_timeout, 1, 0,
+ RQF_PM, NULL))
+ sd_printk(KERN_NOTICE, sdkp,
+ "Failed to clear sense data\n");
+ }
+
+ return sd_resume(dev);
+}
+
/**
* init_sd - entry point for this driver (both when built in or when
* a module).
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 186b5ff52c3a..b9757f24b0d6 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -243,7 +243,7 @@ out:
static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
sector_t sector = blk_rq_pos(rq);
@@ -321,7 +321,7 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
unsigned int nr_blocks)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
unsigned int wp_offset, zno = blk_rq_zone_no(rq);
unsigned long flags;
@@ -386,7 +386,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
unsigned char op, bool all)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
sector_t sector = blk_rq_pos(rq);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
sector_t block = sectors_to_logical(sdkp->device, sector);
@@ -442,7 +442,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
unsigned int good_bytes)
{
int result = cmd->result;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
unsigned int zno = blk_rq_zone_no(rq);
enum req_opf op = req_op(rq);
@@ -516,7 +516,7 @@ unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr)
{
int result = cmd->result;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
if (op_is_zone_mgmt(req_op(rq)) &&
result &&
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index d5889b4f0fd4..8f05248920e8 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -237,8 +237,9 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
if (sfp->parentdp->device->type == TYPE_SCANNER)
return 0;
-
- return blk_verify_command(cmd, filp->f_mode);
+ if (!scsi_cmd_allowed(cmd, filp->f_mode))
+ return -EPERM;
+ return 0;
}
static int
@@ -1107,7 +1108,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
- return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
+ return scsi_ioctl(sdp->device, NULL, filp->f_mode, cmd_in, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
@@ -1163,28 +1164,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
if (ret != -ENOIOCTLCMD)
return ret;
-
- return scsi_ioctl(sdp->device, cmd_in, p);
-}
-
-#ifdef CONFIG_COMPAT
-static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
-{
- void __user *p = compat_ptr(arg);
- Sg_device *sdp;
- Sg_fd *sfp;
- int ret;
-
- if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
- return -ENXIO;
-
- ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
- if (ret != -ENOIOCTLCMD)
- return ret;
-
- return scsi_compat_ioctl(sdp->device, cmd_in, p);
+ return scsi_ioctl(sdp->device, NULL, filp->f_mode, cmd_in, p);
}
-#endif
static __poll_t
sg_poll(struct file *filp, poll_table * wait)
@@ -1439,9 +1420,7 @@ static const struct file_operations sg_fops = {
.write = sg_write,
.poll = sg_poll,
.unlocked_ioctl = sg_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sg_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = sg_open,
.mmap = sg_mmap,
.release = sg_release,
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index cb9e4e968b60..6f83e2df4d64 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,7 +1,7 @@
#
# Kernel configuration file for the SMARTPQI
#
-# Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
# Copyright (c) 2017-2018 Microsemi Corporation
# Copyright (c) 2016 Microsemi Corporation
# Copyright (c) 2016 PMC-Sierra, Inc.
@@ -38,14 +38,14 @@
# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
config SCSI_SMARTPQI
- tristate "Microsemi PQI Driver"
+ tristate "Microchip PQI Driver"
depends on PCI && SCSI && !S390
select SCSI_SAS_ATTRS
select RAID_ATTRS
help
- This driver supports Microsemi PQI controllers.
+ This driver supports Microchip PQI controllers.
- <http://www.microsemi.com>
+ <http://www.microchip.com>
To compile this driver as a module, choose M here: the
module will be called smartpqi.
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index d7dac5572274..70eca203d72f 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -59,7 +59,7 @@ struct pqi_device_registers {
/*
* controller registers
*
- * These are defined by the Microsemi implementation.
+ * These are defined by the Microchip implementation.
*
* Some registers (those named sis_*) are only used when in
* legacy SIS mode before we transition the controller into
@@ -415,7 +415,7 @@ struct pqi_event_config {
u8 reserved[2];
u8 num_event_descriptors;
u8 reserved1;
- struct pqi_event_descriptor descriptors[1];
+ struct pqi_event_descriptor descriptors[];
};
#define PQI_MAX_EVENT_DESCRIPTORS 255
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index dcc0b9618a64..ecb2af3f43ca 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -33,13 +33,13 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.8-045"
+#define DRIVER_VERSION "2.1.10-020"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 8
-#define DRIVER_REVISION 45
+#define DRIVER_RELEASE 10
+#define DRIVER_REVISION 20
-#define DRIVER_NAME "Microsemi PQI Driver (v" \
+#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
#define DRIVER_NAME_SHORT "smartpqi"
@@ -48,8 +48,8 @@
#define PQI_POST_RESET_DELAY_SECS 5
#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
-MODULE_AUTHOR("Microsemi");
-MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
+MODULE_AUTHOR("Microchip");
+MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
DRIVER_VERSION);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
@@ -1322,6 +1322,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
"requested %u bytes, received %u bytes\n",
raid_map_size,
get_unaligned_le32(&raid_map->structure_size));
+ rc = -EINVAL;
goto error;
}
}
@@ -4740,8 +4741,7 @@ static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
}
#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
- (offsetof(struct pqi_event_config, descriptors) + \
- (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
+ struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
bool enable_events)
@@ -5568,7 +5568,7 @@ static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
{
u16 hw_queue;
- hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
+ hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
if (hw_queue > ctrl_info->max_hw_queue_index)
hw_queue = 0;
@@ -5577,7 +5577,7 @@ static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
{
- if (blk_rq_is_passthrough(scmd->request))
+ if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
return false;
return scmd->SCp.this_residual == 0;
@@ -6033,8 +6033,10 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
mutex_lock(&ctrl_info->lun_reset_mutex);
dev_err(&ctrl_info->pci_dev->dev,
- "resetting scsi %d:%d:%d:%d\n",
- shost->host_no, device->bus, device->target, device->lun);
+ "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
+ shost->host_no,
+ device->bus, device->target, device->lun,
+ scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
@@ -7758,11 +7760,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
pqi_init_operational_queues(ctrl_info);
- rc = pqi_request_irqs(ctrl_info);
+ rc = pqi_create_queues(ctrl_info);
if (rc)
return rc;
- rc = pqi_create_queues(ctrl_info);
+ rc = pqi_request_irqs(ctrl_info);
if (rc)
return rc;
@@ -8451,7 +8453,7 @@ static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
if (id->driver_data)
ctrl_description = (char *)id->driver_data;
else
- ctrl_description = "Microsemi Smart Family Controller";
+ ctrl_description = "Microchip Smart Family Controller";
dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
}
@@ -8713,6 +8715,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1108)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1109)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8460)
},
{
@@ -9173,6 +9183,34 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1dfc, 0x3161)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5445)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5446)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5447)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0b27)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0b29)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0b45)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index dd628cc87f78..afd9bafebd1d 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index c954620628e0..d63c46a8e38b 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 12cd2ab1aead..d29c1352a826 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index 6dd0ff188bb4..43a950185e24 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -33,7 +33,7 @@
#include "snic_io.h"
#include "snic.h"
-#define snic_cmd_tag(sc) (((struct scsi_cmnd *) sc)->request->tag)
+#define snic_cmd_tag(sc) (scsi_cmd_to_rq(sc)->tag)
const char *snic_state_str[] = {
[SNIC_INIT] = "SNIC_INIT",
@@ -1636,7 +1636,7 @@ snic_abort_cmd(struct scsi_cmnd *sc)
u32 start_time = jiffies;
SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n",
- sc, sc->cmnd[0], sc->request, tag);
+ sc, sc->cmnd[0], scsi_cmd_to_rq(sc), tag);
if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
SNIC_HOST_ERR(snic->shost,
@@ -2152,7 +2152,7 @@ snic_device_reset(struct scsi_cmnd *sc)
int dr_supp = 0;
SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n",
- sc, sc->cmnd[0], sc->request,
+ sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
snic_cmd_tag(sc));
dr_supp = snic_dev_reset_supported(sc->device);
if (!dr_supp) {
@@ -2335,7 +2335,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
spin_lock_irqsave(&snic->snic_lock, flags);
if (snic_get_state(snic) == SNIC_FWRESET) {
spin_unlock_irqrestore(&snic->snic_lock, flags);
- SNIC_HOST_INFO(shost, "reset:prev reset is in progres\n");
+ SNIC_HOST_INFO(shost, "reset:prev reset is in progress\n");
msleep(SNIC_HOST_RESET_TIMEOUT);
ret = SUCCESS;
@@ -2383,11 +2383,11 @@ snic_host_reset(struct scsi_cmnd *sc)
{
struct Scsi_Host *shost = sc->device->host;
u32 start_time = jiffies;
- int ret = FAILED;
+ int ret;
SNIC_SCSI_DBG(shost,
"host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n",
- sc, sc->cmnd[0], sc->request,
+ sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
snic_cmd_tag(sc), CMD_FLAGS(sc));
ret = snic_reset(shost, sc);
@@ -2494,7 +2494,7 @@ cleanup:
sc->result = DID_TRANSPORT_DISRUPTED << 16;
SNIC_HOST_INFO(snic->shost,
"sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n",
- sc, sc->request->tag, CMD_FLAGS(sc), rqi,
+ sc, scsi_cmd_to_rq(sc)->tag, CMD_FLAGS(sc), rqi,
jiffies_to_msecs(jiffies - st_time));
/* Update IO stats */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 2942a4ec9bdd..8b17b35283aa 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -122,6 +122,8 @@ static void get_capabilities(struct scsi_cd *);
static unsigned int sr_check_events(struct cdrom_device_info *cdi,
unsigned int clearing, int slot);
static int sr_packet(struct cdrom_device_info *, struct packet_command *);
+static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
+ u32 lba, u32 nr, u8 *last_sense);
static const struct cdrom_device_ops sr_dops = {
.open = sr_open,
@@ -135,8 +137,9 @@ static const struct cdrom_device_ops sr_dops = {
.get_mcn = sr_get_mcn,
.reset = sr_reset,
.audio_ioctl = sr_audio_ioctl,
- .capability = SR_CAPABILITIES,
.generic_packet = sr_packet,
+ .read_cdda_bpc = sr_read_cdda_bpc,
+ .capability = SR_CAPABILITIES,
};
static void sr_kref_release(struct kref *kref);
@@ -330,7 +333,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
int good_bytes = (result == 0 ? this_count : 0);
int block_sectors = 0;
long error_sector;
- struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk);
+ struct request *rq = scsi_cmd_to_rq(SCpnt);
+ struct scsi_cd *cd = scsi_cd(rq->rq_disk);
#ifdef DEBUG
scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result);
@@ -352,16 +356,14 @@ static int sr_done(struct scsi_cmnd *SCpnt)
break;
error_sector =
get_unaligned_be32(&SCpnt->sense_buffer[3]);
- if (SCpnt->request->bio != NULL)
- block_sectors =
- bio_sectors(SCpnt->request->bio);
+ if (rq->bio != NULL)
+ block_sectors = bio_sectors(rq->bio);
if (block_sectors < 4)
block_sectors = 4;
if (cd->device->sector_size == 2048)
error_sector <<= 2;
error_sector &= ~(block_sectors - 1);
- good_bytes = (error_sector -
- blk_rq_pos(SCpnt->request)) << 9;
+ good_bytes = (error_sector - blk_rq_pos(rq)) << 9;
if (good_bytes < 0 || good_bytes >= this_count)
good_bytes = 0;
/*
@@ -393,7 +395,7 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
{
int block = 0, this_count, s_size;
struct scsi_cd *cd;
- struct request *rq = SCpnt->request;
+ struct request *rq = scsi_cmd_to_rq(SCpnt);
blk_status_t ret;
ret = scsi_alloc_sgtables(SCpnt);
@@ -558,53 +560,14 @@ static void sr_block_release(struct gendisk *disk, fmode_t mode)
static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
unsigned long arg)
{
- struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
+ struct gendisk *disk = bdev->bd_disk;
+ struct scsi_cd *cd = scsi_cd(disk);
struct scsi_device *sdev = cd->device;
void __user *argp = (void __user *)arg;
int ret;
- mutex_lock(&cd->lock);
-
- ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
- (mode & FMODE_NDELAY) != 0);
- if (ret)
- goto out;
-
- scsi_autopm_get_device(sdev);
-
- /*
- * Send SCSI addressing ioctls directly to mid level, send other
- * ioctls to cdrom/block level.
- */
- switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- ret = scsi_ioctl(sdev, cmd, argp);
- goto put;
- }
-
- ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
- if (ret != -ENOSYS)
- goto put;
-
- ret = scsi_ioctl(sdev, cmd, argp);
-
-put:
- scsi_autopm_put_device(sdev);
-
-out:
- mutex_unlock(&cd->lock);
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
- unsigned long arg)
-{
- struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
- struct scsi_device *sdev = cd->device;
- void __user *argp = compat_ptr(arg);
- int ret;
+ if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
+ return -ENOIOCTLCMD;
mutex_lock(&cd->lock);
@@ -615,32 +578,19 @@ static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsign
scsi_autopm_get_device(sdev);
- /*
- * Send SCSI addressing ioctls directly to mid level, send other
- * ioctls to cdrom/block level.
- */
- switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- ret = scsi_compat_ioctl(sdev, cmd, argp);
- goto put;
+ if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) {
+ ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
+ if (ret != -ENOSYS)
+ goto put;
}
-
- ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, (unsigned long)argp);
- if (ret != -ENOSYS)
- goto put;
-
- ret = scsi_compat_ioctl(sdev, cmd, argp);
+ ret = scsi_ioctl(sdev, disk, mode, cmd, argp);
put:
scsi_autopm_put_device(sdev);
-
out:
mutex_unlock(&cd->lock);
return ret;
-
}
-#endif
static unsigned int sr_block_check_events(struct gendisk *disk,
unsigned int clearing)
@@ -665,9 +615,7 @@ static const struct block_device_operations sr_bdops =
.open = sr_block_open,
.release = sr_block_release,
.ioctl = sr_block_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sr_block_compat_ioctl,
-#endif
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.check_events = sr_block_check_events,
};
@@ -1008,6 +956,57 @@ static int sr_packet(struct cdrom_device_info *cdi,
return cgc->stat;
}
+static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
+ u32 lba, u32 nr, u8 *last_sense)
+{
+ struct gendisk *disk = cdi->disk;
+ u32 len = nr * CD_FRAMESIZE_RAW;
+ struct scsi_request *req;
+ struct request *rq;
+ struct bio *bio;
+ int ret;
+
+ rq = blk_get_request(disk->queue, REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+ req = scsi_req(rq);
+
+ ret = blk_rq_map_user(disk->queue, rq, NULL, ubuf, len, GFP_KERNEL);
+ if (ret)
+ goto out_put_request;
+
+ req->cmd[0] = GPCMD_READ_CD;
+ req->cmd[1] = 1 << 2;
+ req->cmd[2] = (lba >> 24) & 0xff;
+ req->cmd[3] = (lba >> 16) & 0xff;
+ req->cmd[4] = (lba >> 8) & 0xff;
+ req->cmd[5] = lba & 0xff;
+ req->cmd[6] = (nr >> 16) & 0xff;
+ req->cmd[7] = (nr >> 8) & 0xff;
+ req->cmd[8] = nr & 0xff;
+ req->cmd[9] = 0xf8;
+ req->cmd_len = 12;
+ rq->timeout = 60 * HZ;
+ bio = rq->bio;
+
+ blk_execute_rq(disk, rq, 0);
+ if (scsi_req(rq)->result) {
+ struct scsi_sense_hdr sshdr;
+
+ scsi_normalize_sense(req->sense, req->sense_len,
+ &sshdr);
+ *last_sense = sshdr.sense_key;
+ ret = -EIO;
+ }
+
+ if (blk_rq_unmap_user(bio))
+ ret = -EFAULT;
+out_put_request:
+ blk_put_request(rq);
+ return ret;
+}
+
+
/**
* sr_kref_release - Called to free the scsi_cd structure
* @kref: pointer to embedded kref
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index d1abc020f3c0..9d04929f03a1 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3494,8 +3494,9 @@ out:
/* The ioctl command */
-static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user *p)
+static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
{
+ void __user *p = (void __user *)arg;
int i, cmd_nr, cmd_type, bt;
int retval = 0;
unsigned int blk;
@@ -3815,74 +3816,44 @@ static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user
goto out;
}
mutex_unlock(&STp->lock);
- switch (cmd_in) {
- case SCSI_IOCTL_STOP_UNIT:
- /* unload */
- retval = scsi_ioctl(STp->device, cmd_in, p);
- if (!retval) {
- STp->rew_at_close = 0;
- STp->ready = ST_NO_TAPE;
- }
- return retval;
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- break;
+ switch (cmd_in) {
+ case SG_IO:
+ case SCSI_IOCTL_SEND_COMMAND:
+ case CDROM_SEND_PACKET:
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ default:
+ break;
+ }
- default:
- if ((cmd_in == SG_IO ||
- cmd_in == SCSI_IOCTL_SEND_COMMAND ||
- cmd_in == CDROM_SEND_PACKET) &&
- !capable(CAP_SYS_RAWIO))
- i = -EPERM;
- else
- i = scsi_cmd_ioctl(STp->device->request_queue,
- NULL, file->f_mode, cmd_in,
- p);
- if (i != -ENOTTY)
- return i;
- break;
+ retval = scsi_ioctl(STp->device, NULL, file->f_mode, cmd_in, p);
+ if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
+ /* unload */
+ STp->rew_at_close = 0;
+ STp->ready = ST_NO_TAPE;
}
- return -ENOTTY;
+ return retval;
out:
mutex_unlock(&STp->lock);
return retval;
}
-static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
-{
- void __user *p = (void __user *)arg;
- struct scsi_tape *STp = file->private_data;
- int ret;
-
- ret = st_ioctl_common(file, cmd_in, p);
- if (ret != -ENOTTY)
- return ret;
-
- return scsi_ioctl(STp->device, cmd_in, p);
-}
-
#ifdef CONFIG_COMPAT
static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
{
- void __user *p = compat_ptr(arg);
- struct scsi_tape *STp = file->private_data;
- int ret;
-
/* argument conversion is handled using put_user_mtpos/put_user_mtget */
switch (cmd_in) {
case MTIOCPOS32:
- return st_ioctl_common(file, MTIOCPOS, p);
+ cmd_in = MTIOCPOS;
+ break;
case MTIOCGET32:
- return st_ioctl_common(file, MTIOCGET, p);
+ cmd_in = MTIOCGET;
+ break;
}
- ret = st_ioctl_common(file, cmd_in, p);
- if (ret != -ENOTTY)
- return ret;
-
- return scsi_compat_ioctl(STp->device, cmd_in, p);
+ return st_ioctl(file, cmd_in, arg);
}
#endif
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 491b435273a6..f1ba7f5b52a8 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -540,7 +540,7 @@ stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
msg_h = (struct st_msg_header *)req - 1;
if (likely(cmd)) {
msg_h->channel = (u8)cmd->device->channel;
- msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+ msg_h->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
}
addr = hba->dma_handle + hba->req_head * hba->rq_size;
addr += (hba->ccb[tag].sg_count+4)/11;
@@ -690,7 +690,7 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
cmd->scsi_done = done;
- tag = cmd->request->tag;
+ tag = scsi_cmd_to_rq(cmd)->tag;
if (unlikely(tag >= host->can_queue))
return SCSI_MLQUEUE_HOST_BUSY;
@@ -1246,7 +1246,7 @@ static int stex_abort(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct st_hba *hba = (struct st_hba *)host->hostdata;
- u16 tag = cmd->request->tag;
+ u16 tag = scsi_cmd_to_rq(cmd)->tag;
void __iomem *base;
u32 data;
int result = SUCCESS;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 37506b3fe5a9..ebbbc1299c62 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -710,7 +710,7 @@ static u64 storvsc_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
* Cannot return an ID of 0, which is reserved for an unsolicited
* message from Hyper-V.
*/
- return (u64)blk_mq_unique_tag(request->cmd->request) + 1;
+ return (u64)blk_mq_unique_tag(scsi_cmd_to_rq(request->cmd)) + 1;
}
static void handle_sc_creation(struct vmbus_channel *new_sc)
@@ -1211,7 +1211,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
storvsc_log(device, loglevel,
"tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
- request->cmd->request->tag,
+ scsi_cmd_to_rq(request->cmd)->tag,
stor_pkt->vm_srb.cdb[0],
vstor_packet->vm_srb.scsi_status,
vstor_packet->vm_srb.srb_status,
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 2e3fbc2fae97..f7f724a3ff1d 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -336,7 +336,7 @@ static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
{
int wanted_len = cmd->SCp.this_residual;
- if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(cmd->request))
+ if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)))
return 0;
return wanted_len;
@@ -366,8 +366,9 @@ static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)
}
/* clean up after our dma is done */
-static int sun3scsi_dma_finish(int write_flag)
+static int sun3scsi_dma_finish(enum dma_data_direction data_dir)
{
+ const bool write_flag = data_dir == DMA_TO_DEVICE;
unsigned short __maybe_unused count;
unsigned short fifo;
int ret = 0;
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 16b65fc4405c..6d0b07b9cb31 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -500,8 +500,8 @@ static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd,
* Shorten our settle_time if needed for
* this command not to time out.
*/
- if (np->s.settle_time_valid && cmd->request->timeout) {
- unsigned long tlimit = jiffies + cmd->request->timeout;
+ if (np->s.settle_time_valid && scsi_cmd_to_rq(cmd)->timeout) {
+ unsigned long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout;
tlimit -= SYM_CONF_TIMER_INTERVAL*2;
if (time_after(np->s.settle_time, tlimit)) {
np->s.settle_time = tlimit;
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 2d137953e7b4..432df76e6318 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -183,3 +183,19 @@ config SCSI_UFS_CRYPTO
Enabling this makes it possible for the kernel to use the crypto
capabilities of the UFS device (if present) to perform crypto
operations on data being transferred to/from the device.
+
+config SCSI_UFS_HPB
+ bool "Support UFS Host Performance Booster"
+ depends on SCSI_UFSHCD
+ help
+ The UFS HPB feature improves random read performance. It caches
+ L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
+ read command by piggybacking physical page number for bypassing FTL (flash
+ translation layer)'s L2P address translation.
+
+config SCSI_UFS_FAULT_INJECTION
+ bool "UFS Fault Injection Support"
+ depends on SCSI_UFSHCD && FAULT_INJECTION
+ help
+ Enable fault injection support in the UFS driver. This makes it easier
+ to test the UFS error handler and abort handler.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 06f3a3fe4a44..c407da9b5171 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -8,6 +8,8 @@ ufshcd-core-y += ufshcd.o ufs-sysfs.o
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
+ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o
+ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index 908ff39c4856..7da8be2f35c4 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -318,11 +318,8 @@ static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c
index ec4589afbc13..679289e1a78e 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pci.c
+++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c
@@ -23,31 +23,6 @@ static int tc_type = TC_G210_INV;
module_param(tc_type, int, 0);
MODULE_PARM_DESC(tc_type, "Test Chip Type (20 = 20-bit, 40 = 40-bit)");
-static int tc_dwc_g210_pci_suspend(struct device *dev)
-{
- return ufshcd_system_suspend(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_resume(struct device *dev)
-{
- return ufshcd_system_resume(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_runtime_suspend(struct device *dev)
-{
- return ufshcd_runtime_suspend(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_runtime_resume(struct device *dev)
-{
- return ufshcd_runtime_resume(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_runtime_idle(struct device *dev)
-{
- return ufshcd_runtime_idle(dev_get_drvdata(dev));
-}
-
/*
* struct ufs_hba_dwc_vops - UFS DWC specific variant operations
*/
@@ -143,11 +118,8 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = {
- .suspend = tc_dwc_g210_pci_suspend,
- .resume = tc_dwc_g210_pci_resume,
- .runtime_suspend = tc_dwc_g210_pci_runtime_suspend,
- .runtime_resume = tc_dwc_g210_pci_runtime_resume,
- .runtime_idle = tc_dwc_g210_pci_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
index a1268e4f44d6..783ec43efa78 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
+++ b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
@@ -84,11 +84,8 @@ static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
};
static struct platform_driver tc_dwc_g210_pltfm_driver = {
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index cf46d6f86e0e..a14dd8ce56d4 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -260,7 +260,7 @@ static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
struct ufs_hba *hba = ufs->hba;
struct list_head *head = &hba->clk_list_head;
struct ufs_clk_info *clki;
- u32 pclk_rate;
+ unsigned long pclk_rate;
u32 f_min, f_max;
u8 div = 0;
int ret = 0;
@@ -299,7 +299,7 @@ static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
}
if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) {
- dev_err(hba->dev, "not available pclk range %d\n", pclk_rate);
+ dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate);
ret = -EINVAL;
goto out;
}
@@ -1287,11 +1287,8 @@ static const struct of_device_id exynos_ufs_of_match[] = {
};
static const struct dev_pm_ops exynos_ufs_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/scsi/ufs/ufs-exynos.h
index 67505fe32ebf..dadf4fd10dd8 100644
--- a/drivers/scsi/ufs/ufs-exynos.h
+++ b/drivers/scsi/ufs/ufs-exynos.h
@@ -184,7 +184,7 @@ struct exynos_ufs {
u32 pclk_div;
u32 pclk_avail_min;
u32 pclk_avail_max;
- u32 mclk_rate;
+ unsigned long mclk_rate;
int avail_ln_rx;
int avail_ln_tx;
int rx_sel_idx;
diff --git a/drivers/scsi/ufs/ufs-fault-injection.c b/drivers/scsi/ufs/ufs-fault-injection.c
new file mode 100644
index 000000000000..7ac7c4e7ff83
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-fault-injection.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kconfig.h>
+#include <linux/types.h>
+#include <linux/fault-inject.h>
+#include <linux/module.h>
+#include "ufs-fault-injection.h"
+
+static int ufs_fault_get(char *buffer, const struct kernel_param *kp);
+static int ufs_fault_set(const char *val, const struct kernel_param *kp);
+
+static const struct kernel_param_ops ufs_fault_ops = {
+ .get = ufs_fault_get,
+ .set = ufs_fault_set,
+};
+
+enum { FAULT_INJ_STR_SIZE = 80 };
+
+/*
+ * For more details about fault injection, please refer to
+ * Documentation/fault-injection/fault-injection.rst.
+ */
+static char g_trigger_eh_str[FAULT_INJ_STR_SIZE];
+module_param_cb(trigger_eh, &ufs_fault_ops, g_trigger_eh_str, 0644);
+MODULE_PARM_DESC(trigger_eh,
+ "Fault injection. trigger_eh=<interval>,<probability>,<space>,<times>");
+static DECLARE_FAULT_ATTR(ufs_trigger_eh_attr);
+
+static char g_timeout_str[FAULT_INJ_STR_SIZE];
+module_param_cb(timeout, &ufs_fault_ops, g_timeout_str, 0644);
+MODULE_PARM_DESC(timeout,
+ "Fault injection. timeout=<interval>,<probability>,<space>,<times>");
+static DECLARE_FAULT_ATTR(ufs_timeout_attr);
+
+static int ufs_fault_get(char *buffer, const struct kernel_param *kp)
+{
+ const char *fault_str = kp->arg;
+
+ return sysfs_emit(buffer, "%s\n", fault_str);
+}
+
+static int ufs_fault_set(const char *val, const struct kernel_param *kp)
+{
+ struct fault_attr *attr = NULL;
+
+ if (kp->arg == g_trigger_eh_str)
+ attr = &ufs_trigger_eh_attr;
+ else if (kp->arg == g_timeout_str)
+ attr = &ufs_timeout_attr;
+
+ if (WARN_ON_ONCE(!attr))
+ return -EINVAL;
+
+ if (!setup_fault_attr(attr, (char *)val))
+ return -EINVAL;
+
+ strlcpy(kp->arg, val, FAULT_INJ_STR_SIZE);
+
+ return 0;
+}
+
+bool ufs_trigger_eh(void)
+{
+ return should_fail(&ufs_trigger_eh_attr, 1);
+}
+
+bool ufs_fail_completion(void)
+{
+ return should_fail(&ufs_timeout_attr, 1);
+}
diff --git a/drivers/scsi/ufs/ufs-fault-injection.h b/drivers/scsi/ufs/ufs-fault-injection.h
new file mode 100644
index 000000000000..6d0cd8e10c87
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-fault-injection.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _UFS_FAULT_INJECTION_H
+#define _UFS_FAULT_INJECTION_H
+
+#include <linux/kconfig.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION
+bool ufs_trigger_eh(void);
+bool ufs_fail_completion(void);
+#else
+static inline bool ufs_trigger_eh(void)
+{
+ return false;
+}
+
+static inline bool ufs_fail_completion(void)
+{
+ return false;
+}
+#endif
+
+#endif /* _UFS_FAULT_INJECTION_H */
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 5b147a48161b..6b706de8354b 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -572,11 +572,8 @@ static int ufs_hisi_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops ufs_hisi_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index d2c251628a05..80b3545dd17d 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -1140,11 +1140,8 @@ static int ufs_mtk_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops ufs_mtk_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 9b1d18d7c9bb..9d9770f1db4f 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1546,11 +1546,8 @@ MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
#endif
static const struct dev_pm_ops ufs_qcom_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 52bd807f7940..5c405ff7b6ea 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -604,6 +604,8 @@ UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
+UFS_DEVICE_DESC_PARAM(hpb_version, _HPB_VER, 2);
+UFS_DEVICE_DESC_PARAM(hpb_control, _HPB_CONTROL, 1);
UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
@@ -636,6 +638,8 @@ static struct attribute *ufs_sysfs_device_descriptor[] = {
&dev_attr_number_of_secure_wpa.attr,
&dev_attr_psa_max_data_size.attr,
&dev_attr_psa_state_timeout.attr,
+ &dev_attr_hpb_version.attr,
+ &dev_attr_hpb_control.attr,
&dev_attr_ext_feature_sup.attr,
&dev_attr_wb_presv_us_en.attr,
&dev_attr_wb_type.attr,
@@ -709,6 +713,10 @@ UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
_ENM4_MAX_NUM_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
_ENM4_CAP_ADJ_FCTR, 2);
+UFS_GEOMETRY_DESC_PARAM(hpb_region_size, _HPB_REGION_SIZE, 1);
+UFS_GEOMETRY_DESC_PARAM(hpb_number_lu, _HPB_NUMBER_LU, 1);
+UFS_GEOMETRY_DESC_PARAM(hpb_subregion_size, _HPB_SUBREGION_SIZE, 1);
+UFS_GEOMETRY_DESC_PARAM(hpb_max_active_regions, _HPB_MAX_ACTIVE_REGS, 2);
UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
@@ -746,6 +754,10 @@ static struct attribute *ufs_sysfs_geometry_descriptor[] = {
&dev_attr_enh3_memory_capacity_adjustment_factor.attr,
&dev_attr_enh4_memory_max_alloc_units.attr,
&dev_attr_enh4_memory_capacity_adjustment_factor.attr,
+ &dev_attr_hpb_region_size.attr,
+ &dev_attr_hpb_number_lu.attr,
+ &dev_attr_hpb_subregion_size.attr,
+ &dev_attr_hpb_max_active_regions.attr,
&dev_attr_wb_max_alloc_units.attr,
&dev_attr_wb_max_wb_luns.attr,
&dev_attr_wb_buff_cap_adj.attr,
@@ -1006,6 +1018,7 @@ UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
UFS_FLAG(wb_enable, _WB_EN);
UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
+UFS_FLAG(hpb_enable, _HPB_EN);
static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_device_init.attr,
@@ -1019,6 +1032,7 @@ static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_wb_enable.attr,
&dev_attr_wb_flush_en.attr,
&dev_attr_wb_flush_during_h8.attr,
+ &dev_attr_hpb_enable.attr,
NULL,
};
@@ -1065,6 +1079,7 @@ out: \
static DEVICE_ATTR_RO(_name)
UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN);
+UFS_ATTRIBUTE(max_data_size_hpb_single_cmd, _MAX_HPB_SINGLE_CMD);
UFS_ATTRIBUTE(current_power_mode, _POWER_MODE);
UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL);
UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN);
@@ -1088,6 +1103,7 @@ UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_boot_lun_enabled.attr,
+ &dev_attr_max_data_size_hpb_single_cmd.attr,
&dev_attr_current_power_mode.attr,
&dev_attr_active_icc_level.attr,
&dev_attr_ooo_data_enabled.attr,
@@ -1147,6 +1163,7 @@ static DEVICE_ATTR_RO(_pname)
#define UFS_UNIT_DESC_PARAM(_name, _uname, _size) \
UFS_LUN_DESC_PARAM(_name, _uname, UNIT, _size)
+UFS_UNIT_DESC_PARAM(lu_enable, _LU_ENABLE, 1);
UFS_UNIT_DESC_PARAM(boot_lun_id, _BOOT_LUN_ID, 1);
UFS_UNIT_DESC_PARAM(lun_write_protect, _LU_WR_PROTECT, 1);
UFS_UNIT_DESC_PARAM(lun_queue_depth, _LU_Q_DEPTH, 1);
@@ -1160,10 +1177,13 @@ UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
+UFS_UNIT_DESC_PARAM(hpb_lu_max_active_regions, _HPB_LU_MAX_ACTIVE_RGNS, 2);
+UFS_UNIT_DESC_PARAM(hpb_pinned_region_start_offset, _HPB_PIN_RGN_START_OFF, 2);
+UFS_UNIT_DESC_PARAM(hpb_number_pinned_regions, _HPB_NUM_PIN_RGNS, 2);
UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
-
static struct attribute *ufs_sysfs_unit_descriptor[] = {
+ &dev_attr_lu_enable.attr,
&dev_attr_boot_lun_id.attr,
&dev_attr_lun_write_protect.attr,
&dev_attr_lun_queue_depth.attr,
@@ -1177,6 +1197,9 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_physical_memory_resourse_count.attr,
&dev_attr_context_capabilities.attr,
&dev_attr_large_unit_granularity.attr,
+ &dev_attr_hpb_lu_max_active_regions.attr,
+ &dev_attr_hpb_pinned_region_start_offset.attr,
+ &dev_attr_hpb_number_pinned_regions.attr,
&dev_attr_wb_buf_alloc_units.attr,
NULL,
};
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index cb80b9670bfe..8c6b38b1b142 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -122,12 +122,14 @@ enum flag_idn {
QUERY_FLAG_IDN_WB_EN = 0x0E,
QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F,
QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10,
+ QUERY_FLAG_IDN_HPB_RESET = 0x11,
+ QUERY_FLAG_IDN_HPB_EN = 0x12,
};
/* Attribute idn for Query requests */
enum attr_idn {
QUERY_ATTR_IDN_BOOT_LU_EN = 0x00,
- QUERY_ATTR_IDN_RESERVED = 0x01,
+ QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD = 0x01,
QUERY_ATTR_IDN_POWER_MODE = 0x02,
QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
QUERY_ATTR_IDN_OOO_DATA_EN = 0x04,
@@ -195,6 +197,9 @@ enum unit_desc_param {
UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
+ UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS = 0x23,
+ UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF = 0x25,
+ UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS = 0x27,
UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29,
};
@@ -235,6 +240,8 @@ enum device_desc_param {
DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
DEVICE_DESC_PARAM_PSA_TMT = 0x29,
DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
+ DEVICE_DESC_PARAM_HPB_VER = 0x40,
+ DEVICE_DESC_PARAM_HPB_CONTROL = 0x42,
DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F,
DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53,
DEVICE_DESC_PARAM_WB_TYPE = 0x54,
@@ -283,6 +290,10 @@ enum geometry_desc_param {
GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
+ GEOMETRY_DESC_PARAM_HPB_REGION_SIZE = 0x48,
+ GEOMETRY_DESC_PARAM_HPB_NUMBER_LU = 0x49,
+ GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE = 0x4A,
+ GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 0x4B,
GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F,
GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53,
GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54,
@@ -327,8 +338,10 @@ enum {
/* Possible values for dExtendedUFSFeaturesSupport */
enum {
+ UFS_DEV_HPB_SUPPORT = BIT(7),
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
};
+#define UFS_DEV_HPB_SUPPORT_VERSION 0x310
#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
@@ -466,6 +479,41 @@ struct utp_cmd_rsp {
u8 sense_data[UFS_SENSE_SIZE];
};
+struct ufshpb_active_field {
+ __be16 active_rgn;
+ __be16 active_srgn;
+};
+#define HPB_ACT_FIELD_SIZE 4
+
+/**
+ * struct utp_hpb_rsp - Response UPIU structure
+ * @residual_transfer_count: Residual transfer count DW-3
+ * @reserved1: Reserved double words DW-4 to DW-7
+ * @sense_data_len: Sense data length DW-8 U16
+ * @desc_type: Descriptor type of sense data
+ * @additional_len: Additional length of sense data
+ * @hpb_op: HPB operation type
+ * @lun: LUN of response UPIU
+ * @active_rgn_cnt: Active region count
+ * @inactive_rgn_cnt: Inactive region count
+ * @hpb_active_field: Recommended to read HPB region and subregion
+ * @hpb_inactive_field: To be inactivated HPB region and subregion
+ */
+struct utp_hpb_rsp {
+ __be32 residual_transfer_count;
+ __be32 reserved1[4];
+ __be16 sense_data_len;
+ u8 desc_type;
+ u8 additional_len;
+ u8 hpb_op;
+ u8 lun;
+ u8 active_rgn_cnt;
+ u8 inactive_rgn_cnt;
+ struct ufshpb_active_field hpb_active_field[2];
+ __be16 hpb_inactive_field[2];
+};
+#define UTP_HPB_RSP_SIZE 40
+
/**
* struct utp_upiu_rsp - general upiu response structure
* @header: UPIU header structure DW-0 to DW-2
@@ -476,6 +524,7 @@ struct utp_upiu_rsp {
struct utp_upiu_header header;
union {
struct utp_cmd_rsp sr;
+ struct utp_hpb_rsp hr;
struct utp_upiu_query qr;
};
};
@@ -544,6 +593,9 @@ struct ufs_dev_info {
u16 wspecversion;
u32 clk_gating_wait_us;
+ /* UFS HPB related flag */
+ bool hpb_enabled;
+
/* UFS WB related flags */
bool wb_enabled;
bool wb_buf_flush_enabled;
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 07f559ac5883..35ec9ea79869 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -116,4 +116,10 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
+/*
+ * Some UFS devices require L2P entry should be swapped before being sent to the
+ * UFS device for HPB READ command.
+ */
+#define UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ (1 << 12)
+
#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index e6c334bfb4c2..b3bcc5c882da 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -385,48 +385,6 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
.device_reset = ufs_intel_device_reset,
};
-#ifdef CONFIG_PM_SLEEP
-/**
- * ufshcd_pci_suspend - suspend power management function
- * @dev: pointer to PCI device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-static int ufshcd_pci_suspend(struct device *dev)
-{
- return ufshcd_system_suspend(dev_get_drvdata(dev));
-}
-
-/**
- * ufshcd_pci_resume - resume power management function
- * @dev: pointer to PCI device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-static int ufshcd_pci_resume(struct device *dev)
-{
- return ufshcd_system_resume(dev_get_drvdata(dev));
-}
-
-#endif /* !CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_PM
-static int ufshcd_pci_runtime_suspend(struct device *dev)
-{
- return ufshcd_runtime_suspend(dev_get_drvdata(dev));
-}
-static int ufshcd_pci_runtime_resume(struct device *dev)
-{
- return ufshcd_runtime_resume(dev_get_drvdata(dev));
-}
-static int ufshcd_pci_runtime_idle(struct device *dev)
-{
- return ufshcd_runtime_idle(dev_get_drvdata(dev));
-}
-#endif /* !CONFIG_PM */
-
/**
* ufshcd_pci_shutdown - main function to put the controller in reset state
* @pdev: pointer to PCI device handle
@@ -510,10 +468,8 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
- SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
- ufshcd_pci_runtime_resume,
- ufshcd_pci_runtime_idle)
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend, ufshcd_pci_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
#ifdef CONFIG_PM_SLEEP
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 298e22ef907e..8859c13f4e09 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -170,53 +170,6 @@ out:
return err;
}
-#ifdef CONFIG_PM
-/**
- * ufshcd_pltfrm_suspend - suspend power management function
- * @dev: pointer to device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-int ufshcd_pltfrm_suspend(struct device *dev)
-{
- return ufshcd_system_suspend(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_suspend);
-
-/**
- * ufshcd_pltfrm_resume - resume power management function
- * @dev: pointer to device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-int ufshcd_pltfrm_resume(struct device *dev)
-{
- return ufshcd_system_resume(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_resume);
-
-int ufshcd_pltfrm_runtime_suspend(struct device *dev)
-{
- return ufshcd_runtime_suspend(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_suspend);
-
-int ufshcd_pltfrm_runtime_resume(struct device *dev)
-{
- return ufshcd_runtime_resume(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_resume);
-
-int ufshcd_pltfrm_runtime_idle(struct device *dev)
-{
- return ufshcd_runtime_idle(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_idle);
-
-#endif /* CONFIG_PM */
-
void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
{
ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/scsi/ufs/ufshcd-pltfrm.h
index 772a8e848098..c33e28ac6ef6 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.h
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.h
@@ -33,22 +33,4 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops);
void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
-#ifdef CONFIG_PM
-
-int ufshcd_pltfrm_suspend(struct device *dev);
-int ufshcd_pltfrm_resume(struct device *dev);
-int ufshcd_pltfrm_runtime_suspend(struct device *dev);
-int ufshcd_pltfrm_runtime_resume(struct device *dev);
-int ufshcd_pltfrm_runtime_idle(struct device *dev);
-
-#else /* !CONFIG_PM */
-
-#define ufshcd_pltfrm_suspend NULL
-#define ufshcd_pltfrm_resume NULL
-#define ufshcd_pltfrm_runtime_suspend NULL
-#define ufshcd_pltfrm_runtime_resume NULL
-#define ufshcd_pltfrm_runtime_idle NULL
-
-#endif /* CONFIG_PM */
-
#endif /* UFSHCD_PLTFRM_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 708b3b62fc4d..3841ab49f556 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -17,15 +17,18 @@
#include <linux/blk-pm.h>
#include <linux/blkdev.h>
#include <scsi/scsi_driver.h>
+#include <scsi/scsi_transport.h>
+#include "../scsi_transport_api.h"
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
#include "ufs-sysfs.h"
#include "ufs-debugfs.h"
+#include "ufs-fault-injection.h"
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
+#include "ufshpb.h"
#include <asm/unaligned.h>
-#include "../sd.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -128,15 +131,6 @@ enum {
UFSHCD_CAN_QUEUE = 32,
};
-/* UFSHCD states */
-enum {
- UFSHCD_STATE_RESET,
- UFSHCD_STATE_ERROR,
- UFSHCD_STATE_OPERATIONAL,
- UFSHCD_STATE_EH_SCHEDULED_FATAL,
- UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
-};
-
/* UFSHCD error handling flags */
enum {
UFSHCD_EH_IN_PROGRESS = (1 << 0),
@@ -205,7 +199,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
static struct ufs_dev_fix ufs_fixups[] = {
/* UFS cards deviations table */
UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
+ UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
@@ -242,7 +237,6 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
-static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -253,11 +247,6 @@ static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
-static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
-{
- return tag >= 0 && tag < hba->nutrs;
-}
-
static inline void ufshcd_enable_irq(struct ufs_hba *hba)
{
if (!hba->is_irq_enabled) {
@@ -371,26 +360,24 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
- u64 lba = -1;
+ u64 lba;
u8 opcode = 0, group_id = 0;
u32 intr, doorbell;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct scsi_cmnd *cmd = lrbp->cmd;
+ struct request *rq = scsi_cmd_to_rq(cmd);
int transfer_len = -1;
if (!cmd)
return;
- if (!trace_ufshcd_command_enabled()) {
- /* trace UPIU W/O tracing command */
- ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
- return;
- }
-
/* trace UPIU also */
ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
+ if (!trace_ufshcd_command_enabled())
+ return;
+
opcode = cmd->cmnd[0];
- lba = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request));
+ lba = scsi_get_lba(cmd);
if (opcode == READ_10 || opcode == WRITE_10) {
/*
@@ -404,7 +391,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
/*
* The number of Bytes to be unmapped beginning with the lba.
*/
- transfer_len = blk_rq_bytes(cmd->request);
+ transfer_len = blk_rq_bytes(rq);
}
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
@@ -758,16 +745,6 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
}
/**
- * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
- * @hba: per adapter instance
- * @tag: position of the bit to be cleared
- */
-static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
-{
- clear_bit(tag, &hba->outstanding_reqs);
-}
-
-/**
* ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
* @reg: Register value of host controller status
*
@@ -2078,7 +2055,7 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
spin_lock_irqsave(hba->host->host_lock, flags);
if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
- struct request *req = lrbp->cmd->request;
+ struct request *req = scsi_cmd_to_rq(lrbp->cmd);
struct ufs_hba_monitor *m = &hba->monitor;
ktime_t now, inc, lat;
@@ -2112,27 +2089,22 @@ static inline
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+ unsigned long flags;
lrbp->issue_time_stamp = ktime_get();
lrbp->compl_time_stamp = ktime_set(0, 0);
- ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_start_monitor(hba, lrbp);
- if (ufshcd_has_utrlcnr(hba)) {
- set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag,
- REG_UTP_TRANSFER_REQ_DOOR_BELL);
- } else {
- unsigned long flags;
- spin_lock_irqsave(hba->host->host_lock, flags);
- set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag,
- REG_UTP_TRANSFER_REQ_DOOR_BELL);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ if (hba->vops && hba->vops->setup_xfer_req)
+ hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
+ __set_bit(task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
/* Make sure that doorbell is committed immediately */
wmb();
}
@@ -2247,15 +2219,15 @@ static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
}
/**
- * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
+ * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
* @hba: per adapter instance
* @uic_cmd: UIC command
- *
- * Mutex must be held.
*/
static inline void
ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
+ lockdep_assert_held(&hba->uic_cmd_mutex);
+
WARN_ON(hba->active_uic_cmd);
hba->active_uic_cmd = uic_cmd;
@@ -2273,11 +2245,10 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
}
/**
- * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
+ * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
* @hba: per adapter instance
* @uic_cmd: UIC command
*
- * Must be called with mutex held.
* Returns 0 only if success.
*/
static int
@@ -2286,6 +2257,8 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
int ret;
unsigned long flags;
+ lockdep_assert_held(&hba->uic_cmd_mutex);
+
if (wait_for_completion_timeout(&uic_cmd->done,
msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
@@ -2315,14 +2288,15 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* @uic_cmd: UIC command
* @completion: initialize the completion only if this is set to true
*
- * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
- * with mutex held and host_lock locked.
* Returns 0 only if success.
*/
static int
__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
bool completion)
{
+ lockdep_assert_held(&hba->uic_cmd_mutex);
+ lockdep_assert_held(hba->host->host_lock);
+
if (!ufshcd_ready_for_uic_cmd(hba)) {
dev_err(hba->dev,
"Controller not ready to accept UIC commands\n");
@@ -2701,20 +2675,12 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
*/
static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
+ struct ufs_hba *hba = shost_priv(host);
+ int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp;
- struct ufs_hba *hba;
- int tag;
int err = 0;
- hba = shost_priv(host);
-
- tag = cmd->request->tag;
- if (!ufshcd_valid_tag(hba, tag)) {
- dev_err(hba->dev,
- "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
- __func__, tag, cmd, cmd->request);
- BUG();
- }
+ WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
if (!down_read_trylock(&hba->clk_scaling_lock))
return SCSI_MLQUEUE_HOST_BUSY;
@@ -2748,12 +2714,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_ERROR);
cmd->scsi_done(cmd);
goto out;
- default:
- dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
- __func__, hba->ufshcd_state);
- set_host_byte(cmd, DID_BAD_TARGET);
- cmd->scsi_done(cmd);
- goto out;
}
hba->req_abort_count = 0;
@@ -2766,15 +2726,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
(hba->clk_gating.state != CLKS_ON));
- if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
- if (hba->pm_op_in_progress)
- set_host_byte(cmd, DID_BAD_TARGET);
- else
- err = SCSI_MLQUEUE_HOST_BUSY;
- ufshcd_release(hba);
- goto out;
- }
-
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
@@ -2784,10 +2735,17 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
- ufshcd_prepare_lrbp_crypto(cmd->request, lrbp);
+ ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
lrbp->req_abort_skip = false;
+ err = ufshpb_prep(hba, lrbp);
+ if (err == -EAGAIN) {
+ lrbp->cmd = NULL;
+ ufshcd_release(hba);
+ goto out;
+ }
+
ufshcd_comp_scsi_upiu(hba, lrbp);
err = ufshcd_map_sg(hba, lrbp);
@@ -2796,12 +2754,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_release(hba);
goto out;
}
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
ufshcd_send_command(hba, tag);
out:
up_read(&hba->clk_scaling_lock);
+
+ if (ufs_trigger_eh())
+ scsi_schedule_eh(hba->host);
+
return err;
}
@@ -2907,8 +2867,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
msecs_to_jiffies(max_timeout));
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
hba->dev_cmd.complete = NULL;
if (likely(time_left)) {
@@ -2930,7 +2888,9 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
* we also need to clear the outstanding_request
* field in hba
*/
- ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ __clear_bit(lrbp->task_tag, &hba->outstanding_reqs);
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
}
return err;
@@ -2949,11 +2909,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
struct request_queue *q = hba->cmd_queue;
+ DECLARE_COMPLETION_ONSTACK(wait);
struct request *req;
struct ufshcd_lrb *lrbp;
int err;
int tag;
- struct completion wait;
down_read(&hba->clk_scaling_lock);
@@ -2968,17 +2928,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
goto out_unlock;
}
tag = req->tag;
- WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
+ WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
/* Set the timeout such that the SCSI error handler is not activated. */
req->timeout = msecs_to_jiffies(2 * timeout);
blk_mq_start_request(req);
- if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
- err = -EBUSY;
- goto out;
- }
-
- init_completion(&wait);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
@@ -2988,8 +2942,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
ufshcd_send_command(hba, tag);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
@@ -3194,7 +3146,7 @@ out_unlock:
*
* Returns 0 for success, non-zero in case of failure
*/
-static int ufshcd_query_attr_retry(struct ufs_hba *hba,
+int ufshcd_query_attr_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
u32 *attr_val)
{
@@ -3419,9 +3371,11 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
if (is_kmalloc) {
/* Make sure we don't copy more data than available */
- if (param_offset + param_size > buff_len)
- param_size = buff_len - param_offset;
- memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+ if (param_offset >= buff_len)
+ ret = -EINVAL;
+ else
+ memcpy(param_read_buf, &desc_buf[param_offset],
+ min_t(u32, param_size, buff_len - param_offset));
}
out:
if (is_kmalloc)
@@ -3965,6 +3919,35 @@ out:
}
EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
+static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
+{
+ lockdep_assert_held(hba->host->host_lock);
+
+ return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
+ (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
+}
+
+static void ufshcd_schedule_eh(struct ufs_hba *hba)
+{
+ bool schedule_eh = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /* handle fatal errors only when link is not in error state */
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+ ufshcd_is_saved_err_fatal(hba))
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
+ else
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
+ schedule_eh = true;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (schedule_eh)
+ scsi_schedule_eh(hba->host);
+}
+
/**
* ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
* state) and waits for it to take effect.
@@ -3983,14 +3966,14 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
*/
static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
{
- struct completion uic_async_done;
+ DECLARE_COMPLETION_ONSTACK(uic_async_done);
unsigned long flags;
+ bool schedule_eh = false;
u8 status;
int ret;
bool reenable_intr = false;
mutex_lock(&hba->uic_cmd_mutex);
- init_completion(&uic_async_done);
ufshcd_add_delay_before_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -4055,10 +4038,14 @@ out:
ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
if (ret) {
ufshcd_set_link_broken(hba);
- ufshcd_schedule_eh_work(hba);
+ schedule_eh = true;
}
+
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (schedule_eh)
+ ufshcd_schedule_eh(hba);
mutex_unlock(&hba->uic_cmd_mutex);
return ret;
@@ -4987,6 +4974,26 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
return scsi_change_queue_depth(sdev, depth);
}
+static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ /* skip well-known LU */
+ if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
+ !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
+ return;
+
+ ufshpb_destroy_lu(hba, sdev);
+}
+
+static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ /* skip well-known LU */
+ if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
+ !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
+ return;
+
+ ufshpb_init_hpb_lu(hba, sdev);
+}
+
/**
* ufshcd_slave_configure - adjust SCSI device configurations
* @sdev: pointer to SCSI device
@@ -4996,6 +5003,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
+ ufshcd_hpb_configure(hba, sdev);
+
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
@@ -5020,15 +5029,37 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
static void ufshcd_slave_destroy(struct scsi_device *sdev)
{
struct ufs_hba *hba;
+ unsigned long flags;
hba = shost_priv(sdev->host);
+
+ ufshcd_hpb_destroy(hba, sdev);
+
/* Drop the reference as it won't be needed anymore */
if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
- unsigned long flags;
-
spin_lock_irqsave(hba->host->host_lock, flags);
hba->sdev_ufs_device = NULL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ } else if (hba->sdev_ufs_device) {
+ struct device *supplier = NULL;
+
+ /* Ensure UFS Device WLUN exists and does not disappear */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->sdev_ufs_device) {
+ supplier = &hba->sdev_ufs_device->sdev_gendev;
+ get_device(supplier);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (supplier) {
+ /*
+ * If a LUN fails to probe (e.g. absent BOOT WLUN), the
+ * device will not have been registered but can still
+ * have a device link holding a reference to the device.
+ */
+ device_link_remove(&sdev->sdev_gendev, supplier);
+ put_device(supplier);
+ }
}
}
@@ -5124,6 +5155,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
/* Flushed in suspend */
schedule_work(&hba->eeh_work);
+
+ if (scsi_status == SAM_STAT_GOOD)
+ ufshpb_rsp_upiu(hba, lrbp);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -5232,10 +5266,12 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
/**
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
- * @completed_reqs: requests to complete
+ * @completed_reqs: bitmask that indicates which requests to complete
+ * @retry_requests: whether to ask the SCSI core to retry completed requests
*/
static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
- unsigned long completed_reqs)
+ unsigned long completed_reqs,
+ bool retry_requests)
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
@@ -5244,8 +5280,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
bool update_scaling = false;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
- if (!test_and_clear_bit(index, &hba->outstanding_reqs))
- continue;
lrbp = &hba->lrb[index];
lrbp->compl_time_stamp = ktime_get();
cmd = lrbp->cmd;
@@ -5253,7 +5287,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_update_monitor(hba, lrbp);
ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
- result = ufshcd_transfer_rsp_status(hba, lrbp);
+ result = retry_requests ? DID_BUS_BUSY << 16 :
+ ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
cmd->result = result;
/* Mark completed command as NULL in LRB */
@@ -5277,17 +5312,19 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
}
/**
- * ufshcd_trc_handler - handle transfer requests completion
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
- * @use_utrlcnr: get completed requests from UTRLCNR
+ * @retry_requests: whether or not to ask to retry requests
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ bool retry_requests)
{
- unsigned long completed_reqs = 0;
+ unsigned long completed_reqs, flags;
+ u32 tr_doorbell;
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
@@ -5300,27 +5337,21 @@ static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
!(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
ufshcd_reset_intr_aggr(hba);
- if (use_utrlcnr) {
- u32 utrlcnr;
-
- utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL);
- if (utrlcnr) {
- ufshcd_writel(hba, utrlcnr,
- REG_UTP_TRANSFER_REQ_LIST_COMPL);
- completed_reqs = utrlcnr;
- }
- } else {
- unsigned long flags;
- u32 tr_doorbell;
+ if (ufs_fail_completion())
+ return IRQ_HANDLED;
- spin_lock_irqsave(hba->host->host_lock, flags);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
+ WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
+ "completed: %#lx; outstanding: %#lx\n", completed_reqs,
+ hba->outstanding_reqs);
+ hba->outstanding_reqs &= ~completed_reqs;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (completed_reqs) {
- __ufshcd_transfer_req_compl(hba, completed_reqs);
+ __ufshcd_transfer_req_compl(hba, completed_reqs,
+ retry_requests);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
@@ -5799,7 +5830,13 @@ out:
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba)
{
- ufshcd_trc_handler(hba, false);
+ ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
+ ufshcd_tmc_handler(hba);
+}
+
+static void ufshcd_retry_aborted_requests(struct ufs_hba *hba)
+{
+ ufshcd_transfer_req_compl(hba, /*retry_requests=*/true);
ufshcd_tmc_handler(hba);
}
@@ -5874,27 +5911,6 @@ out:
return err_handling;
}
-/* host lock must be held before calling this func */
-static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
-{
- return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
- (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
-}
-
-/* host lock must be held before calling this func */
-static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
-{
- /* handle fatal errors only when link is not in error state */
- if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
- if (hba->force_reset || ufshcd_is_link_broken(hba) ||
- ufshcd_is_saved_err_fatal(hba))
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
- else
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
- queue_work(hba->eh_wq, &hba->eh_work);
- }
-}
-
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
down_write(&hba->clk_scaling_lock);
@@ -6028,11 +6044,11 @@ static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
/**
* ufshcd_err_handler - handle UFS errors that require s/w attention
- * @work: pointer to work structure
+ * @host: SCSI host pointer
*/
-static void ufshcd_err_handler(struct work_struct *work)
+static void ufshcd_err_handler(struct Scsi_Host *host)
{
- struct ufs_hba *hba;
+ struct ufs_hba *hba = shost_priv(host);
unsigned long flags;
bool err_xfer = false;
bool err_tm = false;
@@ -6040,10 +6056,9 @@ static void ufshcd_err_handler(struct work_struct *work)
int tag;
bool needs_reset = false, needs_restore = false;
- hba = container_of(work, struct ufs_hba, eh_work);
-
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->host->host_eh_scheduled = 0;
if (ufshcd_err_handling_should_stop(hba)) {
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
@@ -6141,8 +6156,7 @@ static void ufshcd_err_handler(struct work_struct *work)
}
lock_skip_pending_xfer_clear:
- /* Complete the requests that are cleared by s/w */
- ufshcd_complete_requests(hba);
+ ufshcd_retry_aborted_requests(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->silence_err_logs = false;
@@ -6357,7 +6371,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
"host_regs: ");
ufshcd_print_pwr_info(hba);
}
- ufshcd_schedule_eh_work(hba);
retval |= IRQ_HANDLED;
}
/*
@@ -6369,6 +6382,10 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
hba->errors = 0;
hba->uic_error = 0;
spin_unlock(hba->host->host_lock);
+
+ if (queue_eh_work)
+ ufshcd_schedule_eh(hba);
+
return retval;
}
@@ -6440,7 +6457,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba));
+ retval |= ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
return retval;
}
@@ -6548,9 +6565,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
/* send command to the controller */
__set_bit(task_tag, &hba->outstanding_tasks);
- /* Make sure descriptors are ready before ringing the task doorbell */
- wmb();
-
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */
wmb();
@@ -6663,11 +6677,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op)
{
struct request_queue *q = hba->cmd_queue;
+ DECLARE_COMPLETION_ONSTACK(wait);
struct request *req;
struct ufshcd_lrb *lrbp;
int err = 0;
int tag;
- struct completion wait;
u8 upiu_flags;
down_read(&hba->clk_scaling_lock);
@@ -6678,14 +6692,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
goto out_unlock;
}
tag = req->tag;
- WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
+ WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
err = -EBUSY;
goto out;
}
- init_completion(&wait);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
lrbp->cmd = NULL;
@@ -6723,8 +6736,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
ufshcd_send_command(hba, tag);
/*
@@ -6865,7 +6876,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
- __ufshcd_transfer_req_compl(hba, pos);
+ __ufshcd_transfer_req_compl(hba, pos, /*retry_requests=*/true);
}
}
@@ -6981,33 +6992,24 @@ out:
*/
static int ufshcd_abort(struct scsi_cmnd *cmd)
{
- struct Scsi_Host *host;
- struct ufs_hba *hba;
+ struct Scsi_Host *host = cmd->device->host;
+ struct ufs_hba *hba = shost_priv(host);
+ int tag = scsi_cmd_to_rq(cmd)->tag;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
unsigned long flags;
- unsigned int tag;
- int err = 0;
- struct ufshcd_lrb *lrbp;
+ int err = FAILED;
u32 reg;
- host = cmd->device->host;
- hba = shost_priv(host);
- tag = cmd->request->tag;
- lrbp = &hba->lrb[tag];
- if (!ufshcd_valid_tag(hba, tag)) {
- dev_err(hba->dev,
- "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
- __func__, tag, cmd, cmd->request);
- BUG();
- }
+ WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
ufshcd_hold(hba, false);
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- /* If command is already aborted/completed, return SUCCESS */
+ /* If command is already aborted/completed, return FAILED. */
if (!(test_bit(tag, &hba->outstanding_reqs))) {
dev_err(hba->dev,
"%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
__func__, tag, hba->outstanding_reqs, reg);
- goto out;
+ goto release;
}
/* Print Transfer Request of aborted task */
@@ -7036,7 +7038,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
__func__, tag);
- goto cleanup;
+ __ufshcd_transfer_req_compl(hba, 1UL << tag, /*retry_requests=*/false);
+ goto release;
}
/*
@@ -7045,40 +7048,39 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* will be to send LU reset which, again, is a spec violation.
* To avoid these unnecessary/illegal steps, first we clean up
* the lrb taken by this cmd and re-set it in outstanding_reqs,
- * then queue the eh_work and bail.
+ * then queue the error handler and bail.
*/
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
- __ufshcd_transfer_req_compl(hba, (1UL << tag));
- set_bit(tag, &hba->outstanding_reqs);
+
spin_lock_irqsave(host->host_lock, flags);
hba->force_reset = true;
- ufshcd_schedule_eh_work(hba);
spin_unlock_irqrestore(host->host_lock, flags);
- goto out;
+
+ ufshcd_schedule_eh(hba);
+
+ goto release;
}
/* Skip task abort in case previous aborts failed and report failure */
- if (lrbp->req_abort_skip)
- err = -EIO;
- else
- err = ufshcd_try_to_abort_task(hba, tag);
+ if (lrbp->req_abort_skip) {
+ dev_err(hba->dev, "%s: skipping abort\n", __func__);
+ ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
+ goto release;
+ }
- if (!err) {
-cleanup:
- __ufshcd_transfer_req_compl(hba, (1UL << tag));
-out:
- err = SUCCESS;
- } else {
+ err = ufshcd_try_to_abort_task(hba, tag);
+ if (err) {
dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
err = FAILED;
+ goto release;
}
- /*
- * This ufshcd_release() corresponds to the original scsi cmd that got
- * aborted here (as we won't get any IRQ for it).
- */
+ err = SUCCESS;
+
+release:
+ /* Matches the ufshcd_hold() call at the start of this function. */
ufshcd_release(hba);
return err;
}
@@ -7101,9 +7103,10 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
* Stop the host controller and complete the requests
* cleared by h/w
*/
+ ufshpb_reset_host(hba);
ufshcd_hba_stop(hba);
hba->silence_err_logs = true;
- ufshcd_complete_requests(hba);
+ ufshcd_retry_aborted_requests(hba);
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
@@ -7188,11 +7191,10 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->force_reset = true;
- ufshcd_schedule_eh_work(hba);
dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- flush_work(&hba->eh_work);
+ ufshcd_err_handler(hba->host);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
@@ -7499,6 +7501,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
u8 model_index;
+ u8 b_ufs_feature_sup;
u8 *desc_buf;
struct ufs_dev_info *dev_info = &hba->dev_info;
@@ -7526,9 +7529,26 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
/* getting Specification Version in big endian format */
dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+ b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+ if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
+ (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
+ bool hpb_en = false;
+
+ ufshpb_get_dev_info(hba, desc_buf);
+
+ if (!ufshpb_is_legacy(hba))
+ err = ufshcd_query_flag_retry(hba,
+ UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_HPB_EN, 0,
+ &hpb_en);
+
+ if (ufshpb_is_legacy(hba) || (!err && hpb_en))
+ dev_info->hpb_enabled = true;
+ }
+
err = ufshcd_read_string_desc(hba, model_index,
&dev_info->model, SD_ASCII_STD);
if (err < 0) {
@@ -7760,6 +7780,10 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
hba->dev_info.max_lu_supported = 8;
+ if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
+ GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
+ ufshpb_get_geo_info(hba, desc_buf);
+
out:
kfree(desc_buf);
return err;
@@ -7902,6 +7926,7 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
}
ufs_bsg_probe(hba);
+ ufshpb_init(hba);
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
@@ -7909,8 +7934,61 @@ out:
return ret;
}
+static void ufshcd_request_sense_done(struct request *rq, blk_status_t error)
+{
+ if (error != BLK_STS_OK)
+ pr_err("%s: REQUEST SENSE failed (%d)\n", __func__, error);
+ kfree(rq->end_io_data);
+ blk_put_request(rq);
+}
+
static int
-ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp);
+ufshcd_request_sense_async(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ /*
+ * Some UFS devices clear unit attention condition only if the sense
+ * size used (UFS_SENSE_SIZE in this case) is non-zero.
+ */
+ static const u8 cmd[6] = {REQUEST_SENSE, 0, 0, 0, UFS_SENSE_SIZE, 0};
+ struct scsi_request *rq;
+ struct request *req;
+ char *buffer;
+ int ret;
+
+ buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN,
+ /*flags=*/BLK_MQ_REQ_PM);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ goto out_free;
+ }
+
+ ret = blk_rq_map_kern(sdev->request_queue, req,
+ buffer, UFS_SENSE_SIZE, GFP_NOIO);
+ if (ret)
+ goto out_put;
+
+ rq = scsi_req(req);
+ rq->cmd_len = ARRAY_SIZE(cmd);
+ memcpy(rq->cmd, cmd, rq->cmd_len);
+ rq->retries = 3;
+ req->timeout = 1 * HZ;
+ req->rq_flags |= RQF_PM | RQF_QUIET;
+ req->end_io_data = buffer;
+
+ blk_execute_rq_nowait(/*bd_disk=*/NULL, req, /*at_head=*/true,
+ ufshcd_request_sense_done);
+ return 0;
+
+out_put:
+ blk_put_request(req);
+out_free:
+ kfree(buffer);
+ return ret;
+}
static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
{
@@ -7938,7 +8016,7 @@ static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
if (ret)
goto out_err;
- ret = ufshcd_send_request_sense(hba, sdp);
+ ret = ufshcd_request_sense_async(hba, sdp);
scsi_device_put(sdp);
out_err:
if (ret)
@@ -7967,13 +8045,13 @@ out:
}
/**
- * ufshcd_probe_hba - probe hba to detect device and initialize
+ * ufshcd_probe_hba - probe hba to detect device and initialize it
* @hba: per-adapter instance
- * @async: asynchronous execution or not
+ * @init_dev_params: whether or not to call ufshcd_device_params_init().
*
* Execute link-startup and verify device initialization
*/
-static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
{
int ret;
unsigned long flags;
@@ -8005,7 +8083,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
* Initialize UFS device parameters used by driver, these
* parameters are associated with UFS descriptors.
*/
- if (async) {
+ if (init_dev_params) {
ret = ufshcd_device_params_init(hba);
if (ret)
goto out;
@@ -8050,6 +8128,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ ufshpb_reset(hba);
out:
spin_lock_irqsave(hba->host->host_lock, flags);
if (ret)
@@ -8097,6 +8176,10 @@ out:
static const struct attribute_group *ufshcd_driver_groups[] = {
&ufs_sysfs_unit_descriptor_group,
&ufs_sysfs_lun_attributes_group,
+#ifdef CONFIG_SCSI_UFS_HPB
+ &ufs_sysfs_hpb_stat_group,
+ &ufs_sysfs_hpb_param_group,
+#endif
NULL,
};
@@ -8521,8 +8604,6 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (hba->is_powered) {
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
- if (hba->eh_wq)
- destroy_workqueue(hba->eh_wq);
ufs_debugfs_hba_exit(hba);
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
@@ -8533,35 +8614,6 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
}
}
-static int
-ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
-{
- unsigned char cmd[6] = {REQUEST_SENSE,
- 0,
- 0,
- 0,
- UFS_SENSE_SIZE,
- 0};
- char *buffer;
- int ret;
-
- buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
- if (!buffer) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
- UFS_SENSE_SIZE, NULL, NULL,
- msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
- if (ret)
- pr_err("%s: failed with err %d\n", __func__, ret);
-
- kfree(buffer);
-out:
- return ret;
-}
-
/**
* ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
* power mode
@@ -8739,6 +8791,7 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
usleep_range(5000, 5100);
}
+#ifdef CONFIG_PM
static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
{
int ret = 0;
@@ -8766,6 +8819,7 @@ vcc_disable:
out:
return ret;
}
+#endif /* CONFIG_PM */
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
{
@@ -8798,6 +8852,8 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
req_link_state = UIC_LINK_OFF_STATE;
}
+ ufshpb_suspend(hba);
+
/*
* If we can't transition into any of the low power modes
* just gate the clocks.
@@ -8921,6 +8977,7 @@ out:
ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
hba->clk_gating.is_suspended = false;
ufshcd_release(hba);
+ ufshpb_resume(hba);
}
hba->pm_op_in_progress = false;
return ret;
@@ -8999,6 +9056,8 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+
+ ufshpb_resume(hba);
goto out;
set_old_link_state:
@@ -9168,6 +9227,7 @@ static int ufshcd_suspend(struct ufs_hba *hba)
return ret;
}
+#ifdef CONFIG_PM
/**
* ufshcd_resume - helper function for resume operations
* @hba: per adapter instance
@@ -9205,17 +9265,21 @@ out:
ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
return ret;
}
+#endif /* CONFIG_PM */
+#ifdef CONFIG_PM_SLEEP
/**
- * ufshcd_system_suspend - system suspend routine
- * @hba: per adapter instance
+ * ufshcd_system_suspend - system suspend callback
+ * @dev: Device associated with the UFS controller.
*
- * Check the description of ufshcd_suspend() function for more details.
+ * Executed before putting the system into a sleep state in which the contents
+ * of main memory are preserved.
*
* Returns 0 for success and non-zero for failure
*/
-int ufshcd_system_suspend(struct ufs_hba *hba)
+int ufshcd_system_suspend(struct device *dev)
{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
int ret = 0;
ktime_t start = ktime_get();
@@ -9232,16 +9296,19 @@ out:
EXPORT_SYMBOL(ufshcd_system_suspend);
/**
- * ufshcd_system_resume - system resume routine
- * @hba: per adapter instance
+ * ufshcd_system_resume - system resume callback
+ * @dev: Device associated with the UFS controller.
+ *
+ * Executed after waking the system up from a sleep state in which the contents
+ * of main memory were preserved.
*
* Returns 0 for success and non-zero for failure
*/
-
-int ufshcd_system_resume(struct ufs_hba *hba)
+int ufshcd_system_resume(struct device *dev)
{
- int ret = 0;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
ktime_t start = ktime_get();
+ int ret = 0;
if (pm_runtime_suspended(hba->dev))
goto out;
@@ -9256,17 +9323,20 @@ out:
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
+#endif /* CONFIG_PM_SLEEP */
+#ifdef CONFIG_PM
/**
- * ufshcd_runtime_suspend - runtime suspend routine
- * @hba: per adapter instance
+ * ufshcd_runtime_suspend - runtime suspend callback
+ * @dev: Device associated with the UFS controller.
*
* Check the description of ufshcd_suspend() function for more details.
*
* Returns 0 for success and non-zero for failure
*/
-int ufshcd_runtime_suspend(struct ufs_hba *hba)
+int ufshcd_runtime_suspend(struct device *dev)
{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
ktime_t start = ktime_get();
@@ -9281,7 +9351,7 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
/**
* ufshcd_runtime_resume - runtime resume routine
- * @hba: per adapter instance
+ * @dev: Device associated with the UFS controller.
*
* This function basically brings controller
* to active state. Following operations are done in this function:
@@ -9289,8 +9359,9 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
* 1. Turn on all the controller related clocks
* 2. Turn ON VCC rail
*/
-int ufshcd_runtime_resume(struct ufs_hba *hba)
+int ufshcd_runtime_resume(struct device *dev)
{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
ktime_t start = ktime_get();
@@ -9302,12 +9373,7 @@ int ufshcd_runtime_resume(struct ufs_hba *hba)
return ret;
}
EXPORT_SYMBOL(ufshcd_runtime_resume);
-
-int ufshcd_runtime_idle(struct ufs_hba *hba)
-{
- return 0;
-}
-EXPORT_SYMBOL(ufshcd_runtime_idle);
+#endif /* CONFIG_PM */
/**
* ufshcd_shutdown - shutdown routine
@@ -9343,6 +9409,7 @@ void ufshcd_remove(struct ufs_hba *hba)
if (hba->sdev_ufs_device)
ufshcd_rpm_get_sync(hba);
ufs_bsg_remove(hba);
+ ufshpb_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
blk_cleanup_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
@@ -9381,6 +9448,10 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
}
+static struct scsi_transport_template ufshcd_transport_template = {
+ .eh_strategy_handler = ufshcd_err_handler,
+};
+
/**
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
@@ -9407,13 +9478,15 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+ host->transportt = &ufshcd_transport_template;
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
- *hba_handle = hba;
hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
-
INIT_LIST_HEAD(&hba->clk_list_head);
+ spin_lock_init(&hba->outstanding_lock);
+
+ *hba_handle = hba;
out_error:
return err;
@@ -9444,7 +9517,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
int err;
struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev;
- char eh_wq_name[sizeof("ufs_eh_wq_00")];
if (!mmio_base) {
dev_err(hba->dev,
@@ -9498,17 +9570,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false;
- /* Initialize work queues */
- snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
- hba->host->host_no);
- hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
- if (!hba->eh_wq) {
- dev_err(hba->dev, "%s: failed to create eh workqueue\n",
- __func__);
- err = -ENOMEM;
- goto out_disable;
- }
- INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
sema_init(&hba->host_sem, 1);
@@ -9627,6 +9688,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
async_schedule(ufshcd_async_scan, hba);
ufs_sysfs_add_nodes(hba->dev);
+ device_enable_async_suspend(dev);
return 0;
free_tmf_queue:
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 194755c9ddfe..52ea6f350b18 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -476,6 +476,27 @@ struct ufs_stats {
struct ufs_event_hist event[UFS_EVT_CNT];
};
+/**
+ * enum ufshcd_state - UFS host controller state
+ * @UFSHCD_STATE_RESET: Link is not operational. Postpone SCSI command
+ * processing.
+ * @UFSHCD_STATE_OPERATIONAL: The host controller is operational and can process
+ * SCSI commands.
+ * @UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: The error handler has been scheduled.
+ * SCSI commands may be submitted to the controller.
+ * @UFSHCD_STATE_EH_SCHEDULED_FATAL: The error handler has been scheduled. Fail
+ * newly submitted SCSI commands with error code DID_BAD_TARGET.
+ * @UFSHCD_STATE_ERROR: An unrecoverable error occurred, e.g. link recovery
+ * failed. Fail all SCSI commands with error code DID_ERROR.
+ */
+enum ufshcd_state {
+ UFSHCD_STATE_RESET,
+ UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
+ UFSHCD_STATE_EH_SCHEDULED_FATAL,
+ UFSHCD_STATE_ERROR,
+};
+
enum ufshcd_quirks {
/* Interrupt aggregation support is broken */
UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
@@ -641,6 +662,31 @@ struct ufs_hba_variant_params {
u32 wb_flush_threshold;
};
+#ifdef CONFIG_SCSI_UFS_HPB
+/**
+ * struct ufshpb_dev_info - UFSHPB device related info
+ * @num_lu: the number of user logical unit to check whether all lu finished
+ * initialization
+ * @rgn_size: device reported HPB region size
+ * @srgn_size: device reported HPB sub-region size
+ * @slave_conf_cnt: counter to check all lu finished initialization
+ * @hpb_disabled: flag to check if HPB is disabled
+ * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value
+ * @is_legacy: flag to check HPB 1.0
+ * @control_mode: either host or device
+ */
+struct ufshpb_dev_info {
+ int num_lu;
+ int rgn_size;
+ int srgn_size;
+ atomic_t slave_conf_cnt;
+ bool hpb_disabled;
+ u8 max_hpb_single_cmd;
+ bool is_legacy;
+ u8 control_mode;
+};
+#endif
+
struct ufs_hba_monitor {
unsigned long chunk_size;
@@ -674,6 +720,7 @@ struct ufs_hba_monitor {
* @lrb: local reference block
* @cmd_queue: Used to allocate command tags from hba->host->tag_set.
* @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_lock: Protects @outstanding_reqs.
* @outstanding_reqs: Bits representing outstanding transfer requests
* @capabilities: UFS Controller Capabilities
* @nutrs: Transfer Request Queue depth supported by controller
@@ -683,19 +730,17 @@ struct ufs_hba_monitor {
* @priv: pointer to variant specific private data
* @irq: Irq number of the controller
* @active_uic_cmd: handle of active UIC command
- * @uic_cmd_mutex: mutex for uic command
+ * @uic_cmd_mutex: mutex for UIC command
* @tmf_tag_set: TMF tag set.
* @tmf_queue: Used to allocate TMF tags.
* @pwr_done: completion for power mode change
- * @ufshcd_state: UFSHCD states
+ * @ufshcd_state: UFSHCD state
* @eh_flags: Error handling flags
* @intr_mask: Interrupt Mask Bits
* @ee_ctrl_mask: Exception event control mask
* @is_powered: flag to check if HBA is powered
* @shutting_down: flag to check if shutdown has been invoked
* @host_sem: semaphore used to serialize concurrent contexts
- * @eh_wq: Workqueue that eh_work works on
- * @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
* @errors: HBA errors
* @uic_error: UFS interconnect layer error status
@@ -760,6 +805,7 @@ struct ufs_hba {
struct ufshcd_lrb *lrb;
unsigned long outstanding_tasks;
+ spinlock_t outstanding_lock;
unsigned long outstanding_reqs;
u32 capabilities;
@@ -785,7 +831,7 @@ struct ufs_hba {
struct mutex uic_cmd_mutex;
struct completion *uic_async_done;
- u32 ufshcd_state;
+ enum ufshcd_state ufshcd_state;
u32 eh_flags;
u32 intr_mask;
u16 ee_ctrl_mask; /* Exception event mask */
@@ -797,8 +843,6 @@ struct ufs_hba {
struct semaphore host_sem;
/* Work Queues */
- struct workqueue_struct *eh_wq;
- struct work_struct eh_work;
struct work_struct eeh_work;
/* HBA Errors */
@@ -851,6 +895,10 @@ struct ufs_hba {
struct request_queue *bsg_queue;
struct delayed_work rpm_dev_flush_recheck_work;
+#ifdef CONFIG_SCSI_UFS_HPB
+ struct ufshpb_dev_info ufshpb_dev;
+#endif
+
struct ufs_hba_monitor monitor;
#ifdef CONFIG_SCSI_UFS_CRYPTO
@@ -893,16 +941,8 @@ static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
{
-/* DWC UFS Core has the Interrupt aggregation feature but is not detectable*/
-#ifndef CONFIG_SCSI_UFS_DWC
- if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
- !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
- return true;
- else
- return false;
-#else
-return true;
-#endif
+ return (hba->caps & UFSHCD_CAP_INTR_AGGR) &&
+ !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR);
}
static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
@@ -1009,11 +1049,14 @@ static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
return 0;
}
-extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
-extern int ufshcd_runtime_resume(struct ufs_hba *hba);
-extern int ufshcd_runtime_idle(struct ufs_hba *hba);
-extern int ufshcd_system_suspend(struct ufs_hba *hba);
-extern int ufshcd_system_resume(struct ufs_hba *hba);
+#ifdef CONFIG_PM
+extern int ufshcd_runtime_suspend(struct device *dev);
+extern int ufshcd_runtime_resume(struct device *dev);
+#endif
+#ifdef CONFIG_PM_SLEEP
+extern int ufshcd_system_suspend(struct device *dev);
+extern int ufshcd_system_resume(struct device *dev);
+#endif
extern int ufshcd_shutdown(struct ufs_hba *hba);
extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
int agreed_gear,
@@ -1096,6 +1139,9 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
u8 param_offset,
u8 *param_read_buf,
u8 param_size);
+int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode,
+ enum attr_idn idn, u8 index, u8 selector,
+ u32 *attr_val);
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
@@ -1160,11 +1206,6 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
return ufshcd_readl(hba, REG_UFS_VERSION);
}
-static inline bool ufshcd_has_utrlcnr(struct ufs_hba *hba)
-{
- return (hba->ufs_version >= ufshci_version(3, 0));
-}
-
static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
bool up, enum ufs_notify_change_status status)
{
@@ -1226,18 +1267,6 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
return -ENOTSUPP;
}
-static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
- bool is_scsi_cmd)
-{
- if (hba->vops && hba->vops->setup_xfer_req) {
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
-}
-
static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
int tag, u8 tm_function)
{
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 5affb1fce5ad..de95be5d11d4 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -39,7 +39,6 @@ enum {
REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
- REG_UTP_TRANSFER_REQ_LIST_COMPL = 0x64,
REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
new file mode 100644
index 000000000000..02fb51ae8b25
--- /dev/null
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -0,0 +1,2933 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Universal Flash Storage Host Performance Booster
+ *
+ * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Yongmyung Lee <ymhungry.lee@samsung.com>
+ * Jinyoung Choi <j-young.choi@samsung.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/async.h>
+
+#include "ufshcd.h"
+#include "ufshpb.h"
+#include "../sd.h"
+
+#define ACTIVATION_THRESHOLD 8 /* 8 IOs */
+#define READ_TO_MS 1000
+#define READ_TO_EXPIRIES 100
+#define POLLING_INTERVAL_MS 200
+#define THROTTLE_MAP_REQ_DEFAULT 1
+
+/* memory management */
+static struct kmem_cache *ufshpb_mctx_cache;
+static mempool_t *ufshpb_mctx_pool;
+static mempool_t *ufshpb_page_pool;
+/* A cache size of 2MB can cache ppn in the 1GB range. */
+static unsigned int ufshpb_host_map_kbytes = 2048;
+static int tot_active_srgn_pages;
+
+static struct workqueue_struct *ufshpb_wq;
+
+static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
+ int srgn_idx);
+
+bool ufshpb_is_allowed(struct ufs_hba *hba)
+{
+ return !(hba->ufshpb_dev.hpb_disabled);
+}
+
+/* HPB version 1.0 is called as legacy version. */
+bool ufshpb_is_legacy(struct ufs_hba *hba)
+{
+ return hba->ufshpb_dev.is_legacy;
+}
+
+static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
+{
+ return sdev->hostdata;
+}
+
+static int ufshpb_get_state(struct ufshpb_lu *hpb)
+{
+ return atomic_read(&hpb->hpb_state);
+}
+
+static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
+{
+ atomic_set(&hpb->hpb_state, state);
+}
+
+static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
+ struct ufshpb_subregion *srgn)
+{
+ return rgn->rgn_state != HPB_RGN_INACTIVE &&
+ srgn->srgn_state == HPB_SRGN_VALID;
+}
+
+static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
+{
+ return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
+}
+
+static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
+{
+ return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
+ op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
+}
+
+static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
+{
+ return transfer_len <= hpb->pre_req_max_tr_len;
+}
+
+/*
+ * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as
+ * default. It is possible to change range of transfer_len through sysfs.
+ */
+static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len)
+{
+ return len > hpb->pre_req_min_tr_len &&
+ len <= hpb->pre_req_max_tr_len;
+}
+
+static bool ufshpb_is_general_lun(int lun)
+{
+ return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
+}
+
+static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
+{
+ if (hpb->lu_pinned_end != PINNED_NOT_SET &&
+ rgn_idx >= hpb->lu_pinned_start &&
+ rgn_idx <= hpb->lu_pinned_end)
+ return true;
+
+ return false;
+}
+
+static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
+{
+ bool ret = false;
+ unsigned long flags;
+
+ if (ufshpb_get_state(hpb) != HPB_PRESENT)
+ return;
+
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
+ ret = true;
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+
+ if (ret)
+ queue_work(ufshpb_wq, &hpb->map_work);
+}
+
+static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp,
+ struct utp_hpb_rsp *rsp_field)
+{
+ /* Check HPB_UPDATE_ALERT */
+ if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
+ UPIU_HEADER_DWORD(0, 2, 0, 0)))
+ return false;
+
+ if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
+ rsp_field->desc_type != DEV_DES_TYPE ||
+ rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
+ rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
+ rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
+ rsp_field->hpb_op == HPB_RSP_NONE ||
+ (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
+ !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
+ return false;
+
+ if (!ufshpb_is_general_lun(rsp_field->lun)) {
+ dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
+ lrbp->lun);
+ return false;
+ }
+
+ return true;
+}
+
+static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
+ int srgn_offset, int cnt, bool set_dirty)
+{
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn, *prev_srgn = NULL;
+ int set_bit_len;
+ int bitmap_len;
+ unsigned long flags;
+
+next_srgn:
+ rgn = hpb->rgn_tbl + rgn_idx;
+ srgn = rgn->srgn_tbl + srgn_idx;
+
+ if (likely(!srgn->is_last))
+ bitmap_len = hpb->entries_per_srgn;
+ else
+ bitmap_len = hpb->last_srgn_entries;
+
+ if ((srgn_offset + cnt) > bitmap_len)
+ set_bit_len = bitmap_len - srgn_offset;
+ else
+ set_bit_len = cnt;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ if (rgn->rgn_state != HPB_RGN_INACTIVE) {
+ if (set_dirty) {
+ if (srgn->srgn_state == HPB_SRGN_VALID)
+ bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
+ set_bit_len);
+ } else if (hpb->is_hcm) {
+ /* rewind the read timer for lru regions */
+ rgn->read_timeout = ktime_add_ms(ktime_get(),
+ rgn->hpb->params.read_timeout_ms);
+ rgn->read_timeout_expiries =
+ rgn->hpb->params.read_timeout_expiries;
+ }
+ }
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+ if (hpb->is_hcm && prev_srgn != srgn) {
+ bool activate = false;
+
+ spin_lock(&rgn->rgn_lock);
+ if (set_dirty) {
+ rgn->reads -= srgn->reads;
+ srgn->reads = 0;
+ set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
+ } else {
+ srgn->reads++;
+ rgn->reads++;
+ if (srgn->reads == hpb->params.activation_thld)
+ activate = true;
+ }
+ spin_unlock(&rgn->rgn_lock);
+
+ if (activate ||
+ test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+ dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
+ "activate region %d-%d\n", rgn_idx, srgn_idx);
+ }
+
+ prev_srgn = srgn;
+ }
+
+ srgn_offset = 0;
+ if (++srgn_idx == hpb->srgns_per_rgn) {
+ srgn_idx = 0;
+ rgn_idx++;
+ }
+
+ cnt -= set_bit_len;
+ if (cnt > 0)
+ goto next_srgn;
+}
+
+static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
+ int srgn_idx, int srgn_offset, int cnt)
+{
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ int bitmap_len;
+ int bit_len;
+
+next_srgn:
+ rgn = hpb->rgn_tbl + rgn_idx;
+ srgn = rgn->srgn_tbl + srgn_idx;
+
+ if (likely(!srgn->is_last))
+ bitmap_len = hpb->entries_per_srgn;
+ else
+ bitmap_len = hpb->last_srgn_entries;
+
+ if (!ufshpb_is_valid_srgn(rgn, srgn))
+ return true;
+
+ /*
+ * If the region state is active, mctx must be allocated.
+ * In this case, check whether the region is evicted or
+ * mctx allocation fail.
+ */
+ if (unlikely(!srgn->mctx)) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "no mctx in region %d subregion %d.\n",
+ srgn->rgn_idx, srgn->srgn_idx);
+ return true;
+ }
+
+ if ((srgn_offset + cnt) > bitmap_len)
+ bit_len = bitmap_len - srgn_offset;
+ else
+ bit_len = cnt;
+
+ if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
+ srgn_offset) < bit_len + srgn_offset)
+ return true;
+
+ srgn_offset = 0;
+ if (++srgn_idx == hpb->srgns_per_rgn) {
+ srgn_idx = 0;
+ rgn_idx++;
+ }
+
+ cnt -= bit_len;
+ if (cnt > 0)
+ goto next_srgn;
+
+ return false;
+}
+
+static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
+{
+ return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
+}
+
+static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
+ struct ufshpb_map_ctx *mctx, int pos,
+ int len, __be64 *ppn_buf)
+{
+ struct page *page;
+ int index, offset;
+ int copied;
+
+ index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
+ offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
+
+ if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
+ copied = len;
+ else
+ copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
+
+ page = mctx->m_page[index];
+ if (unlikely(!page)) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "error. cannot find page in mctx\n");
+ return -ENOMEM;
+ }
+
+ memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
+ copied * HPB_ENTRY_SIZE);
+
+ return copied;
+}
+
+static void
+ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
+ int *srgn_idx, int *offset)
+{
+ int rgn_offset;
+
+ *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
+ rgn_offset = lpn & hpb->entries_per_rgn_mask;
+ *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
+ *offset = rgn_offset & hpb->entries_per_srgn_mask;
+}
+
+static void
+ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb,
+ struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn,
+ u8 transfer_len, int read_id)
+{
+ unsigned char *cdb = lrbp->cmd->cmnd;
+ __be64 ppn_tmp = ppn;
+ cdb[0] = UFSHPB_READ;
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
+ ppn_tmp = swab64(ppn);
+
+ /* ppn value is stored as big-endian in the host memory */
+ memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
+ cdb[14] = transfer_len;
+ cdb[15] = read_id;
+
+ lrbp->cmd->cmd_len = UFS_CDB_SIZE;
+}
+
+static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
+ unsigned long lpn, unsigned int len,
+ int read_id)
+{
+ cdb[0] = UFSHPB_WRITE_BUFFER;
+ cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID;
+
+ put_unaligned_be32(lpn, &cdb[2]);
+ cdb[6] = read_id;
+ put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]);
+
+ cdb[9] = 0x00; /* Control = 0x00 */
+}
+
+static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
+{
+ struct ufshpb_req *pre_req;
+
+ if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
+ dev_info(&hpb->sdev_ufs_lu->sdev_dev,
+ "pre_req throttle. inflight %d throttle %d",
+ hpb->num_inflight_pre_req, hpb->throttle_pre_req);
+ return NULL;
+ }
+
+ pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
+ struct ufshpb_req, list_req);
+ if (!pre_req) {
+ dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
+ return NULL;
+ }
+
+ list_del_init(&pre_req->list_req);
+ hpb->num_inflight_pre_req++;
+
+ return pre_req;
+}
+
+static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
+ struct ufshpb_req *pre_req)
+{
+ pre_req->req = NULL;
+ bio_reset(pre_req->bio);
+ list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
+ hpb->num_inflight_pre_req--;
+}
+
+static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error)
+{
+ struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
+ struct ufshpb_lu *hpb = pre_req->hpb;
+ unsigned long flags;
+
+ if (error) {
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ struct scsi_sense_hdr sshdr;
+
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
+ scsi_command_normalize_sense(cmd, &sshdr);
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "code %x sense_key %x asc %x ascq %x",
+ sshdr.response_code,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "byte4 %x byte5 %x byte6 %x additional_len %x",
+ sshdr.byte4, sshdr.byte5,
+ sshdr.byte6, sshdr.additional_length);
+ }
+
+ blk_mq_free_request(req);
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ ufshpb_put_pre_req(pre_req->hpb, pre_req);
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+}
+
+static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page)
+{
+ struct ufshpb_lu *hpb = pre_req->hpb;
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ __be64 *addr;
+ int offset = 0;
+ int copied;
+ unsigned long lpn = pre_req->wb.lpn;
+ int rgn_idx, srgn_idx, srgn_offset;
+ unsigned long flags;
+
+ addr = page_address(page);
+ ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+
+next_offset:
+ rgn = hpb->rgn_tbl + rgn_idx;
+ srgn = rgn->srgn_tbl + srgn_idx;
+
+ if (!ufshpb_is_valid_srgn(rgn, srgn))
+ goto mctx_error;
+
+ if (!srgn->mctx)
+ goto mctx_error;
+
+ copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
+ pre_req->wb.len - offset,
+ &addr[offset]);
+
+ if (copied < 0)
+ goto mctx_error;
+
+ offset += copied;
+ srgn_offset += copied;
+
+ if (srgn_offset == hpb->entries_per_srgn) {
+ srgn_offset = 0;
+
+ if (++srgn_idx == hpb->srgns_per_rgn) {
+ srgn_idx = 0;
+ rgn_idx++;
+ }
+ }
+
+ if (offset < pre_req->wb.len)
+ goto next_offset;
+
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ return 0;
+mctx_error:
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ return -ENOMEM;
+}
+
+static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
+ struct request_queue *q,
+ struct ufshpb_req *pre_req)
+{
+ struct page *page = pre_req->wb.m_page;
+ struct bio *bio = pre_req->bio;
+ int entries_bytes, ret;
+
+ if (!page)
+ return -ENOMEM;
+
+ if (ufshpb_prep_entry(pre_req, page))
+ return -ENOMEM;
+
+ entries_bytes = pre_req->wb.len * sizeof(__be64);
+
+ ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
+ if (ret != entries_bytes) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "bio_add_pc_page fail: %d", ret);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
+{
+ if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
+ hpb->cur_read_id = 1;
+ return hpb->cur_read_id;
+}
+
+static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
+ struct ufshpb_req *pre_req, int read_id)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct request_queue *q = sdev->request_queue;
+ struct request *req;
+ struct scsi_request *rq;
+ struct bio *bio = pre_req->bio;
+
+ pre_req->hpb = hpb;
+ pre_req->wb.lpn = sectors_to_logical(cmd->device,
+ blk_rq_pos(scsi_cmd_to_rq(cmd)));
+ pre_req->wb.len = sectors_to_logical(cmd->device,
+ blk_rq_sectors(scsi_cmd_to_rq(cmd)));
+ if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
+ return -ENOMEM;
+
+ req = pre_req->req;
+
+ /* 1. request setup */
+ blk_rq_append_bio(req, bio);
+ req->rq_disk = NULL;
+ req->end_io_data = (void *)pre_req;
+ req->end_io = ufshpb_pre_req_compl_fn;
+
+ /* 2. scsi_request setup */
+ rq = scsi_req(req);
+ rq->retries = 1;
+
+ ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
+ read_id);
+ rq->cmd_len = scsi_command_size(rq->cmd);
+
+ if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
+ return -EAGAIN;
+
+ hpb->stats.pre_req_cnt++;
+
+ return 0;
+}
+
+static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
+ int *read_id)
+{
+ struct ufshpb_req *pre_req;
+ struct request *req = NULL;
+ unsigned long flags;
+ int _read_id;
+ int ret = 0;
+
+ req = blk_get_request(cmd->device->request_queue,
+ REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(req))
+ return -EAGAIN;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ pre_req = ufshpb_get_pre_req(hpb);
+ if (!pre_req) {
+ ret = -EAGAIN;
+ goto unlock_out;
+ }
+ _read_id = ufshpb_get_read_id(hpb);
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+ pre_req->req = req;
+
+ ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
+ if (ret)
+ goto free_pre_req;
+
+ *read_id = _read_id;
+
+ return ret;
+free_pre_req:
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ ufshpb_put_pre_req(hpb, pre_req);
+unlock_out:
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ blk_put_request(req);
+ return ret;
+}
+
+/*
+ * This function will set up HPB read command using host-side L2P map data.
+ */
+int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufshpb_lu *hpb;
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ struct scsi_cmnd *cmd = lrbp->cmd;
+ u32 lpn;
+ __be64 ppn;
+ unsigned long flags;
+ int transfer_len, rgn_idx, srgn_idx, srgn_offset;
+ int read_id = 0;
+ int err = 0;
+
+ hpb = ufshpb_get_hpb_data(cmd->device);
+ if (!hpb)
+ return -ENODEV;
+
+ if (ufshpb_get_state(hpb) == HPB_INIT)
+ return -ENODEV;
+
+ if (ufshpb_get_state(hpb) != HPB_PRESENT) {
+ dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: ufshpb state is not PRESENT", __func__);
+ return -ENODEV;
+ }
+
+ if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
+ (!ufshpb_is_write_or_discard(cmd) &&
+ !ufshpb_is_read_cmd(cmd)))
+ return 0;
+
+ transfer_len = sectors_to_logical(cmd->device,
+ blk_rq_sectors(scsi_cmd_to_rq(cmd)));
+ if (unlikely(!transfer_len))
+ return 0;
+
+ lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
+ ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
+ rgn = hpb->rgn_tbl + rgn_idx;
+ srgn = rgn->srgn_tbl + srgn_idx;
+
+ /* If command type is WRITE or DISCARD, set bitmap as drity */
+ if (ufshpb_is_write_or_discard(cmd)) {
+ ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
+ transfer_len, true);
+ return 0;
+ }
+
+ if (!ufshpb_is_supported_chunk(hpb, transfer_len))
+ return 0;
+
+ WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH);
+
+ if (hpb->is_hcm) {
+ /*
+ * in host control mode, reads are the main source for
+ * activation trials.
+ */
+ ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
+ transfer_len, false);
+
+ /* keep those counters normalized */
+ if (rgn->reads > hpb->entries_per_srgn)
+ schedule_work(&hpb->ufshpb_normalization_work);
+ }
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
+ transfer_len)) {
+ hpb->stats.miss_cnt++;
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ return 0;
+ }
+
+ err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ if (unlikely(err < 0)) {
+ /*
+ * In this case, the region state is active,
+ * but the ppn table is not allocated.
+ * Make sure that ppn table must be allocated on
+ * active state.
+ */
+ dev_err(hba->dev, "get ppn failed. err %d\n", err);
+ return err;
+ }
+ if (!ufshpb_is_legacy(hba) &&
+ ufshpb_is_required_wb(hpb, transfer_len)) {
+ err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
+ if (err) {
+ unsigned long timeout;
+
+ timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
+ hpb->params.requeue_timeout_ms);
+
+ if (time_before(jiffies, timeout))
+ return -EAGAIN;
+
+ hpb->stats.miss_cnt++;
+ return 0;
+ }
+ }
+
+ ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len,
+ read_id);
+
+ hpb->stats.hit_cnt++;
+ return 0;
+}
+
+static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
+ int rgn_idx, enum req_opf dir,
+ bool atomic)
+{
+ struct ufshpb_req *rq;
+ struct request *req;
+ int retries = HPB_MAP_REQ_RETRIES;
+
+ rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
+ if (!rq)
+ return NULL;
+
+retry:
+ req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
+ BLK_MQ_REQ_NOWAIT);
+
+ if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
+ usleep_range(3000, 3100);
+ goto retry;
+ }
+
+ if (IS_ERR(req))
+ goto free_rq;
+
+ rq->hpb = hpb;
+ rq->req = req;
+ rq->rb.rgn_idx = rgn_idx;
+
+ return rq;
+
+free_rq:
+ kmem_cache_free(hpb->map_req_cache, rq);
+ return NULL;
+}
+
+static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
+{
+ blk_put_request(rq->req);
+ kmem_cache_free(hpb->map_req_cache, rq);
+}
+
+static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
+ struct ufshpb_subregion *srgn)
+{
+ struct ufshpb_req *map_req;
+ struct bio *bio;
+ unsigned long flags;
+
+ if (hpb->is_hcm &&
+ hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
+ dev_info(&hpb->sdev_ufs_lu->sdev_dev,
+ "map_req throttle. inflight %d throttle %d",
+ hpb->num_inflight_map_req,
+ hpb->params.inflight_map_req);
+ return NULL;
+ }
+
+ map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
+ if (!map_req)
+ return NULL;
+
+ bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
+ if (!bio) {
+ ufshpb_put_req(hpb, map_req);
+ return NULL;
+ }
+
+ map_req->bio = bio;
+
+ map_req->rb.srgn_idx = srgn->srgn_idx;
+ map_req->rb.mctx = srgn->mctx;
+
+ spin_lock_irqsave(&hpb->param_lock, flags);
+ hpb->num_inflight_map_req++;
+ spin_unlock_irqrestore(&hpb->param_lock, flags);
+
+ return map_req;
+}
+
+static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
+ struct ufshpb_req *map_req)
+{
+ unsigned long flags;
+
+ bio_put(map_req->bio);
+ ufshpb_put_req(hpb, map_req);
+
+ spin_lock_irqsave(&hpb->param_lock, flags);
+ hpb->num_inflight_map_req--;
+ spin_unlock_irqrestore(&hpb->param_lock, flags);
+}
+
+static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
+ struct ufshpb_subregion *srgn)
+{
+ struct ufshpb_region *rgn;
+ u32 num_entries = hpb->entries_per_srgn;
+
+ if (!srgn->mctx) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "no mctx in region %d subregion %d.\n",
+ srgn->rgn_idx, srgn->srgn_idx);
+ return -1;
+ }
+
+ if (unlikely(srgn->is_last))
+ num_entries = hpb->last_srgn_entries;
+
+ bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
+
+ rgn = hpb->rgn_tbl + srgn->rgn_idx;
+ clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
+
+ return 0;
+}
+
+static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
+ int srgn_idx)
+{
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+
+ rgn = hpb->rgn_tbl + rgn_idx;
+ srgn = rgn->srgn_tbl + srgn_idx;
+
+ list_del_init(&rgn->list_inact_rgn);
+
+ if (list_empty(&srgn->list_act_srgn))
+ list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
+
+ hpb->stats.rb_active_cnt++;
+}
+
+static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
+{
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ int srgn_idx;
+
+ rgn = hpb->rgn_tbl + rgn_idx;
+
+ for_each_sub_region(rgn, srgn_idx, srgn)
+ list_del_init(&srgn->list_act_srgn);
+
+ if (list_empty(&rgn->list_inact_rgn))
+ list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
+
+ hpb->stats.rb_inactive_cnt++;
+}
+
+static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
+ struct ufshpb_subregion *srgn)
+{
+ struct ufshpb_region *rgn;
+
+ /*
+ * If there is no mctx in subregion
+ * after I/O progress for HPB_READ_BUFFER, the region to which the
+ * subregion belongs was evicted.
+ * Make sure the region must not evict in I/O progress
+ */
+ if (!srgn->mctx) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "no mctx in region %d subregion %d.\n",
+ srgn->rgn_idx, srgn->srgn_idx);
+ srgn->srgn_state = HPB_SRGN_INVALID;
+ return;
+ }
+
+ rgn = hpb->rgn_tbl + srgn->rgn_idx;
+
+ if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "region %d subregion %d evicted\n",
+ srgn->rgn_idx, srgn->srgn_idx);
+ srgn->srgn_state = HPB_SRGN_INVALID;
+ return;
+ }
+ srgn->srgn_state = HPB_SRGN_VALID;
+}
+
+static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
+{
+ struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
+
+ ufshpb_put_req(umap_req->hpb, umap_req);
+}
+
+static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
+{
+ struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
+ struct ufshpb_lu *hpb = map_req->hpb;
+ struct ufshpb_subregion *srgn;
+ unsigned long flags;
+
+ srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
+ map_req->rb.srgn_idx;
+
+ ufshpb_clear_dirty_bitmap(hpb, srgn);
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ ufshpb_activate_subregion(hpb, srgn);
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+ ufshpb_put_map_req(map_req->hpb, map_req);
+}
+
+static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
+{
+ cdb[0] = UFSHPB_WRITE_BUFFER;
+ cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
+ UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
+ if (rgn)
+ put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
+ cdb[9] = 0x00;
+}
+
+static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
+ int srgn_idx, int srgn_mem_size)
+{
+ cdb[0] = UFSHPB_READ_BUFFER;
+ cdb[1] = UFSHPB_READ_BUFFER_ID;
+
+ put_unaligned_be16(rgn_idx, &cdb[2]);
+ put_unaligned_be16(srgn_idx, &cdb[4]);
+ put_unaligned_be24(srgn_mem_size, &cdb[6]);
+
+ cdb[9] = 0x00;
+}
+
+static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
+ struct ufshpb_req *umap_req,
+ struct ufshpb_region *rgn)
+{
+ struct request *req;
+ struct scsi_request *rq;
+
+ req = umap_req->req;
+ req->timeout = 0;
+ req->end_io_data = (void *)umap_req;
+ rq = scsi_req(req);
+ ufshpb_set_unmap_cmd(rq->cmd, rgn);
+ rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
+
+ blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn);
+
+ hpb->stats.umap_req_cnt++;
+}
+
+static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
+ struct ufshpb_req *map_req, bool last)
+{
+ struct request_queue *q;
+ struct request *req;
+ struct scsi_request *rq;
+ int mem_size = hpb->srgn_mem_size;
+ int ret = 0;
+ int i;
+
+ q = hpb->sdev_ufs_lu->request_queue;
+ for (i = 0; i < hpb->pages_per_srgn; i++) {
+ ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
+ PAGE_SIZE, 0);
+ if (ret != PAGE_SIZE) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "bio_add_pc_page fail %d - %d\n",
+ map_req->rb.rgn_idx, map_req->rb.srgn_idx);
+ return ret;
+ }
+ }
+
+ req = map_req->req;
+
+ blk_rq_append_bio(req, map_req->bio);
+
+ req->end_io_data = map_req;
+
+ rq = scsi_req(req);
+
+ if (unlikely(last))
+ mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
+
+ ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx,
+ map_req->rb.srgn_idx, mem_size);
+ rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
+
+ blk_execute_rq_nowait(NULL, req, 1, ufshpb_map_req_compl_fn);
+
+ hpb->stats.map_req_cnt++;
+ return 0;
+}
+
+static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
+ bool last)
+{
+ struct ufshpb_map_ctx *mctx;
+ u32 num_entries = hpb->entries_per_srgn;
+ int i, j;
+
+ mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
+ if (!mctx)
+ return NULL;
+
+ mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
+ if (!mctx->m_page)
+ goto release_mctx;
+
+ if (unlikely(last))
+ num_entries = hpb->last_srgn_entries;
+
+ mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
+ if (!mctx->ppn_dirty)
+ goto release_m_page;
+
+ for (i = 0; i < hpb->pages_per_srgn; i++) {
+ mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
+ if (!mctx->m_page[i]) {
+ for (j = 0; j < i; j++)
+ mempool_free(mctx->m_page[j], ufshpb_page_pool);
+ goto release_ppn_dirty;
+ }
+ clear_page(page_address(mctx->m_page[i]));
+ }
+
+ return mctx;
+
+release_ppn_dirty:
+ bitmap_free(mctx->ppn_dirty);
+release_m_page:
+ kmem_cache_free(hpb->m_page_cache, mctx->m_page);
+release_mctx:
+ mempool_free(mctx, ufshpb_mctx_pool);
+ return NULL;
+}
+
+static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
+ struct ufshpb_map_ctx *mctx)
+{
+ int i;
+
+ for (i = 0; i < hpb->pages_per_srgn; i++)
+ mempool_free(mctx->m_page[i], ufshpb_page_pool);
+
+ bitmap_free(mctx->ppn_dirty);
+ kmem_cache_free(hpb->m_page_cache, mctx->m_page);
+ mempool_free(mctx, ufshpb_mctx_pool);
+}
+
+static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn)
+{
+ struct ufshpb_subregion *srgn;
+ int srgn_idx;
+
+ for_each_sub_region(rgn, srgn_idx, srgn)
+ if (srgn->srgn_state == HPB_SRGN_ISSUED)
+ return -EPERM;
+
+ return 0;
+}
+
+static void ufshpb_read_to_handler(struct work_struct *work)
+{
+ struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
+ ufshpb_read_to_work.work);
+ struct victim_select_info *lru_info = &hpb->lru_info;
+ struct ufshpb_region *rgn, *next_rgn;
+ unsigned long flags;
+ unsigned int poll;
+ LIST_HEAD(expired_list);
+
+ if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
+ return;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+
+ list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
+ list_lru_rgn) {
+ bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
+
+ if (timedout) {
+ rgn->read_timeout_expiries--;
+ if (is_rgn_dirty(rgn) ||
+ rgn->read_timeout_expiries == 0)
+ list_add(&rgn->list_expired_rgn, &expired_list);
+ else
+ rgn->read_timeout = ktime_add_ms(ktime_get(),
+ hpb->params.read_timeout_ms);
+ }
+ }
+
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+ list_for_each_entry_safe(rgn, next_rgn, &expired_list,
+ list_expired_rgn) {
+ list_del_init(&rgn->list_expired_rgn);
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+ }
+
+ ufshpb_kick_map_work(hpb);
+
+ clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
+
+ poll = hpb->params.timeout_polling_interval_ms;
+ schedule_delayed_work(&hpb->ufshpb_read_to_work,
+ msecs_to_jiffies(poll));
+}
+
+static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
+ struct ufshpb_region *rgn)
+{
+ rgn->rgn_state = HPB_RGN_ACTIVE;
+ list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
+ atomic_inc(&lru_info->active_cnt);
+ if (rgn->hpb->is_hcm) {
+ rgn->read_timeout =
+ ktime_add_ms(ktime_get(),
+ rgn->hpb->params.read_timeout_ms);
+ rgn->read_timeout_expiries =
+ rgn->hpb->params.read_timeout_expiries;
+ }
+}
+
+static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
+ struct ufshpb_region *rgn)
+{
+ list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
+}
+
+static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
+{
+ struct victim_select_info *lru_info = &hpb->lru_info;
+ struct ufshpb_region *rgn, *victim_rgn = NULL;
+
+ list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
+ if (!rgn) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: no region allocated\n",
+ __func__);
+ return NULL;
+ }
+ if (ufshpb_check_srgns_issue_state(hpb, rgn))
+ continue;
+
+ /*
+ * in host control mode, verify that the exiting region
+ * has fewer reads
+ */
+ if (hpb->is_hcm &&
+ rgn->reads > hpb->params.eviction_thld_exit)
+ continue;
+
+ victim_rgn = rgn;
+ break;
+ }
+
+ return victim_rgn;
+}
+
+static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
+ struct ufshpb_region *rgn)
+{
+ list_del_init(&rgn->list_lru_rgn);
+ rgn->rgn_state = HPB_RGN_INACTIVE;
+ atomic_dec(&lru_info->active_cnt);
+}
+
+static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
+ struct ufshpb_subregion *srgn)
+{
+ if (srgn->srgn_state != HPB_SRGN_UNUSED) {
+ ufshpb_put_map_ctx(hpb, srgn->mctx);
+ srgn->srgn_state = HPB_SRGN_UNUSED;
+ srgn->mctx = NULL;
+ }
+}
+
+static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn,
+ bool atomic)
+{
+ struct ufshpb_req *umap_req;
+ int rgn_idx = rgn ? rgn->rgn_idx : 0;
+
+ umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
+ if (!umap_req)
+ return -ENOMEM;
+
+ ufshpb_execute_umap_req(hpb, umap_req, rgn);
+
+ return 0;
+}
+
+static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn)
+{
+ return ufshpb_issue_umap_req(hpb, rgn, true);
+}
+
+static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
+{
+ return ufshpb_issue_umap_req(hpb, NULL, false);
+}
+
+static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn)
+{
+ struct victim_select_info *lru_info;
+ struct ufshpb_subregion *srgn;
+ int srgn_idx;
+
+ lru_info = &hpb->lru_info;
+
+ dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
+
+ ufshpb_cleanup_lru_info(lru_info, rgn);
+
+ for_each_sub_region(rgn, srgn_idx, srgn)
+ ufshpb_purge_active_subregion(hpb, srgn);
+}
+
+static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ if (rgn->rgn_state == HPB_RGN_PINNED) {
+ dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
+ "pinned region cannot drop-out. region %d\n",
+ rgn->rgn_idx);
+ goto out;
+ }
+
+ if (!list_empty(&rgn->list_lru_rgn)) {
+ if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (hpb->is_hcm) {
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ ret = ufshpb_issue_umap_single_req(hpb, rgn);
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ if (ret)
+ goto out;
+ }
+
+ __ufshpb_evict_region(hpb, rgn);
+ }
+out:
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ return ret;
+}
+
+static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn,
+ struct ufshpb_subregion *srgn)
+{
+ struct ufshpb_req *map_req;
+ unsigned long flags;
+ int ret;
+ int err = -EAGAIN;
+ bool alloc_required = false;
+ enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+
+ if (ufshpb_get_state(hpb) != HPB_PRESENT) {
+ dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: ufshpb state is not PRESENT\n", __func__);
+ goto unlock_out;
+ }
+
+ if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
+ (srgn->srgn_state == HPB_SRGN_INVALID)) {
+ err = 0;
+ goto unlock_out;
+ }
+
+ if (srgn->srgn_state == HPB_SRGN_UNUSED)
+ alloc_required = true;
+
+ /*
+ * If the subregion is already ISSUED state,
+ * a specific event (e.g., GC or wear-leveling, etc.) occurs in
+ * the device and HPB response for map loading is received.
+ * In this case, after finishing the HPB_READ_BUFFER,
+ * the next HPB_READ_BUFFER is performed again to obtain the latest
+ * map data.
+ */
+ if (srgn->srgn_state == HPB_SRGN_ISSUED)
+ goto unlock_out;
+
+ srgn->srgn_state = HPB_SRGN_ISSUED;
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+ if (alloc_required) {
+ srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
+ if (!srgn->mctx) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "get map_ctx failed. region %d - %d\n",
+ rgn->rgn_idx, srgn->srgn_idx);
+ state = HPB_SRGN_UNUSED;
+ goto change_srgn_state;
+ }
+ }
+
+ map_req = ufshpb_get_map_req(hpb, srgn);
+ if (!map_req)
+ goto change_srgn_state;
+
+
+ ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
+ if (ret) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: issue map_req failed: %d, region %d - %d\n",
+ __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
+ goto free_map_req;
+ }
+ return 0;
+
+free_map_req:
+ ufshpb_put_map_req(hpb, map_req);
+change_srgn_state:
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ srgn->srgn_state = state;
+unlock_out:
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ return err;
+}
+
+static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
+{
+ struct ufshpb_region *victim_rgn = NULL;
+ struct victim_select_info *lru_info = &hpb->lru_info;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ /*
+ * If region belongs to lru_list, just move the region
+ * to the front of lru list because the state of the region
+ * is already active-state.
+ */
+ if (!list_empty(&rgn->list_lru_rgn)) {
+ ufshpb_hit_lru_info(lru_info, rgn);
+ goto out;
+ }
+
+ if (rgn->rgn_state == HPB_RGN_INACTIVE) {
+ if (atomic_read(&lru_info->active_cnt) ==
+ lru_info->max_lru_active_cnt) {
+ /*
+ * If the maximum number of active regions
+ * is exceeded, evict the least recently used region.
+ * This case may occur when the device responds
+ * to the eviction information late.
+ * It is okay to evict the least recently used region,
+ * because the device could detect this region
+ * by not issuing HPB_READ
+ *
+ * in host control mode, verify that the entering
+ * region has enough reads
+ */
+ if (hpb->is_hcm &&
+ rgn->reads < hpb->params.eviction_thld_enter) {
+ ret = -EACCES;
+ goto out;
+ }
+
+ victim_rgn = ufshpb_victim_lru_info(hpb);
+ if (!victim_rgn) {
+ dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
+ "cannot get victim region %s\n",
+ hpb->is_hcm ? "" : "error");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
+ "LRU full (%d), choose victim %d\n",
+ atomic_read(&lru_info->active_cnt),
+ victim_rgn->rgn_idx);
+
+ if (hpb->is_hcm) {
+ spin_unlock_irqrestore(&hpb->rgn_state_lock,
+ flags);
+ ret = ufshpb_issue_umap_single_req(hpb,
+ victim_rgn);
+ spin_lock_irqsave(&hpb->rgn_state_lock,
+ flags);
+ if (ret)
+ goto out;
+ }
+
+ __ufshpb_evict_region(hpb, victim_rgn);
+ }
+
+ /*
+ * When a region is added to lru_info list_head,
+ * it is guaranteed that the subregion has been
+ * assigned all mctx. If failed, try to receive mctx again
+ * without being added to lru_info list_head
+ */
+ ufshpb_add_lru_info(lru_info, rgn);
+ }
+out:
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ return ret;
+}
+
+static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
+ struct utp_hpb_rsp *rsp_field)
+{
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ int i, rgn_i, srgn_i;
+
+ BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
+ /*
+ * If the active region and the inactive region are the same,
+ * we will inactivate this region.
+ * The device could check this (region inactivated) and
+ * will response the proper active region information
+ */
+ for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
+ rgn_i =
+ be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
+ srgn_i =
+ be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
+
+ rgn = hpb->rgn_tbl + rgn_i;
+ if (hpb->is_hcm &&
+ (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
+ /*
+ * in host control mode, subregion activation
+ * recommendations are only allowed to active regions.
+ * Also, ignore recommendations for dirty regions - the
+ * host will make decisions concerning those by himself
+ */
+ continue;
+ }
+
+ dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
+ "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
+
+ spin_lock(&hpb->rsp_list_lock);
+ ufshpb_update_active_info(hpb, rgn_i, srgn_i);
+ spin_unlock(&hpb->rsp_list_lock);
+
+ srgn = rgn->srgn_tbl + srgn_i;
+
+ /* blocking HPB_READ */
+ spin_lock(&hpb->rgn_state_lock);
+ if (srgn->srgn_state == HPB_SRGN_VALID)
+ srgn->srgn_state = HPB_SRGN_INVALID;
+ spin_unlock(&hpb->rgn_state_lock);
+ }
+
+ if (hpb->is_hcm) {
+ /*
+ * in host control mode the device is not allowed to inactivate
+ * regions
+ */
+ goto out;
+ }
+
+ for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
+ rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
+ dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
+ "inactivate(%d) region %d\n", i, rgn_i);
+
+ spin_lock(&hpb->rsp_list_lock);
+ ufshpb_update_inactive_info(hpb, rgn_i);
+ spin_unlock(&hpb->rsp_list_lock);
+
+ rgn = hpb->rgn_tbl + rgn_i;
+
+ spin_lock(&hpb->rgn_state_lock);
+ if (rgn->rgn_state != HPB_RGN_INACTIVE) {
+ for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
+ srgn = rgn->srgn_tbl + srgn_i;
+ if (srgn->srgn_state == HPB_SRGN_VALID)
+ srgn->srgn_state = HPB_SRGN_INVALID;
+ }
+ }
+ spin_unlock(&hpb->rgn_state_lock);
+
+ }
+
+out:
+ dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
+ rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
+
+ if (ufshpb_get_state(hpb) == HPB_PRESENT)
+ queue_work(ufshpb_wq, &hpb->map_work);
+}
+
+static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
+{
+ struct victim_select_info *lru_info = &hpb->lru_info;
+ struct ufshpb_region *rgn;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+
+ list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
+ set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
+
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+}
+
+/*
+ * This function will parse recommended active subregion information in sense
+ * data field of response UPIU with SAM_STAT_GOOD state.
+ */
+void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
+ struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
+ int data_seg_len;
+
+ if (unlikely(lrbp->lun != rsp_field->lun)) {
+ struct scsi_device *sdev;
+ bool found = false;
+
+ __shost_for_each_device(sdev, hba->host) {
+ hpb = ufshpb_get_hpb_data(sdev);
+
+ if (!hpb)
+ continue;
+
+ if (rsp_field->lun == hpb->lun) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return;
+ }
+
+ if (!hpb)
+ return;
+
+ if (ufshpb_get_state(hpb) == HPB_INIT)
+ return;
+
+ if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
+ (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
+ dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: ufshpb state is not PRESENT/SUSPEND\n",
+ __func__);
+ return;
+ }
+
+ data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
+ & MASK_RSP_UPIU_DATA_SEG_LEN;
+
+ /* To flush remained rsp_list, we queue the map_work task */
+ if (!data_seg_len) {
+ if (!ufshpb_is_general_lun(hpb->lun))
+ return;
+
+ ufshpb_kick_map_work(hpb);
+ return;
+ }
+
+ BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
+
+ if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
+ return;
+
+ hpb->stats.rb_noti_cnt++;
+
+ switch (rsp_field->hpb_op) {
+ case HPB_RSP_REQ_REGION_UPDATE:
+ if (data_seg_len != DEV_DATA_SEG_LEN)
+ dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: data seg length is not same.\n",
+ __func__);
+ ufshpb_rsp_req_region_update(hpb, rsp_field);
+ break;
+ case HPB_RSP_DEV_RESET:
+ dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
+ "UFS device lost HPB information during PM.\n");
+
+ if (hpb->is_hcm) {
+ struct scsi_device *sdev;
+
+ __shost_for_each_device(sdev, hba->host) {
+ struct ufshpb_lu *h = sdev->hostdata;
+
+ if (h)
+ ufshpb_dev_reset_handler(h);
+ }
+ }
+
+ break;
+ default:
+ dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+ "hpb_op is not available: %d\n",
+ rsp_field->hpb_op);
+ break;
+ }
+}
+
+static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn,
+ struct ufshpb_subregion *srgn)
+{
+ if (!list_empty(&rgn->list_inact_rgn))
+ return;
+
+ if (!list_empty(&srgn->list_act_srgn)) {
+ list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
+ return;
+ }
+
+ list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
+}
+
+static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn,
+ struct list_head *pending_list)
+{
+ struct ufshpb_subregion *srgn;
+ int srgn_idx;
+
+ if (!list_empty(&rgn->list_inact_rgn))
+ return;
+
+ for_each_sub_region(rgn, srgn_idx, srgn)
+ if (!list_empty(&srgn->list_act_srgn))
+ return;
+
+ list_add_tail(&rgn->list_inact_rgn, pending_list);
+}
+
+static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
+{
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
+ struct ufshpb_subregion,
+ list_act_srgn))) {
+ if (ufshpb_get_state(hpb) == HPB_SUSPEND)
+ break;
+
+ list_del_init(&srgn->list_act_srgn);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+
+ rgn = hpb->rgn_tbl + srgn->rgn_idx;
+ ret = ufshpb_add_region(hpb, rgn);
+ if (ret)
+ goto active_failed;
+
+ ret = ufshpb_issue_map_req(hpb, rgn, srgn);
+ if (ret) {
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "issue map_req failed. ret %d, region %d - %d\n",
+ ret, rgn->rgn_idx, srgn->srgn_idx);
+ goto active_failed;
+ }
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+ return;
+
+active_failed:
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
+ rgn->rgn_idx, srgn->srgn_idx);
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ ufshpb_add_active_list(hpb, rgn, srgn);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+}
+
+static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
+{
+ struct ufshpb_region *rgn;
+ unsigned long flags;
+ int ret;
+ LIST_HEAD(pending_list);
+
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
+ struct ufshpb_region,
+ list_inact_rgn))) {
+ if (ufshpb_get_state(hpb) == HPB_SUSPEND)
+ break;
+
+ list_del_init(&rgn->list_inact_rgn);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+
+ ret = ufshpb_evict_region(hpb, rgn);
+ if (ret) {
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+ }
+
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ }
+
+ list_splice(&pending_list, &hpb->lh_inact_rgn);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+}
+
+static void ufshpb_normalization_work_handler(struct work_struct *work)
+{
+ struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
+ ufshpb_normalization_work);
+ int rgn_idx;
+ u8 factor = hpb->params.normalization_factor;
+
+ for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
+ struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
+ int srgn_idx;
+
+ spin_lock(&rgn->rgn_lock);
+ rgn->reads = 0;
+ for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
+ struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
+
+ srgn->reads >>= factor;
+ rgn->reads += srgn->reads;
+ }
+ spin_unlock(&rgn->rgn_lock);
+
+ if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
+ continue;
+
+ /* if region is active but has no reads - inactivate it */
+ spin_lock(&hpb->rsp_list_lock);
+ ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
+ spin_unlock(&hpb->rsp_list_lock);
+ }
+}
+
+static void ufshpb_map_work_handler(struct work_struct *work)
+{
+ struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
+
+ if (ufshpb_get_state(hpb) != HPB_PRESENT) {
+ dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: ufshpb state is not PRESENT\n", __func__);
+ return;
+ }
+
+ ufshpb_run_inactive_region_list(hpb);
+ ufshpb_run_active_subregion_list(hpb);
+}
+
+/*
+ * this function doesn't need to hold lock due to be called in init.
+ * (rgn_state_lock, rsp_list_lock, etc..)
+ */
+static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
+ struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn)
+{
+ struct ufshpb_subregion *srgn;
+ int srgn_idx, i;
+ int err = 0;
+
+ for_each_sub_region(rgn, srgn_idx, srgn) {
+ srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
+ srgn->srgn_state = HPB_SRGN_INVALID;
+ if (!srgn->mctx) {
+ err = -ENOMEM;
+ dev_err(hba->dev,
+ "alloc mctx for pinned region failed\n");
+ goto release;
+ }
+
+ list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
+ }
+
+ rgn->rgn_state = HPB_RGN_PINNED;
+ return 0;
+
+release:
+ for (i = 0; i < srgn_idx; i++) {
+ srgn = rgn->srgn_tbl + i;
+ ufshpb_put_map_ctx(hpb, srgn->mctx);
+ }
+ return err;
+}
+
+static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn, bool last)
+{
+ int srgn_idx;
+ struct ufshpb_subregion *srgn;
+
+ for_each_sub_region(rgn, srgn_idx, srgn) {
+ INIT_LIST_HEAD(&srgn->list_act_srgn);
+
+ srgn->rgn_idx = rgn->rgn_idx;
+ srgn->srgn_idx = srgn_idx;
+ srgn->srgn_state = HPB_SRGN_UNUSED;
+ }
+
+ if (unlikely(last && hpb->last_srgn_entries))
+ srgn->is_last = true;
+}
+
+static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn, int srgn_cnt)
+{
+ rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
+ GFP_KERNEL);
+ if (!rgn->srgn_tbl)
+ return -ENOMEM;
+
+ rgn->srgn_cnt = srgn_cnt;
+ return 0;
+}
+
+static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
+ struct ufshpb_lu *hpb,
+ struct ufshpb_dev_info *hpb_dev_info,
+ struct ufshpb_lu_info *hpb_lu_info)
+{
+ u32 entries_per_rgn;
+ u64 rgn_mem_size, tmp;
+
+ /* for pre_req */
+ hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1;
+
+ if (ufshpb_is_legacy(hba))
+ hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
+ else
+ hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
+
+ hpb->cur_read_id = 0;
+
+ hpb->lu_pinned_start = hpb_lu_info->pinned_start;
+ hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
+ (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
+ : PINNED_NOT_SET;
+ hpb->lru_info.max_lru_active_cnt =
+ hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
+
+ rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
+ * HPB_ENTRY_SIZE;
+ do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
+ hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
+ * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
+
+ tmp = rgn_mem_size;
+ do_div(tmp, HPB_ENTRY_SIZE);
+ entries_per_rgn = (u32)tmp;
+ hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
+ hpb->entries_per_rgn_mask = entries_per_rgn - 1;
+
+ hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
+ hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
+ hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
+
+ tmp = rgn_mem_size;
+ do_div(tmp, hpb->srgn_mem_size);
+ hpb->srgns_per_rgn = (int)tmp;
+
+ hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
+ entries_per_rgn);
+ hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
+ (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
+ hpb->last_srgn_entries = hpb_lu_info->num_blocks
+ % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
+
+ hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
+
+ if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
+ hpb->is_hcm = true;
+}
+
+static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
+{
+ struct ufshpb_region *rgn_table, *rgn;
+ int rgn_idx, i;
+ int ret = 0;
+
+ rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
+ GFP_KERNEL);
+ if (!rgn_table)
+ return -ENOMEM;
+
+ for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
+ int srgn_cnt = hpb->srgns_per_rgn;
+ bool last_srgn = false;
+
+ rgn = rgn_table + rgn_idx;
+ rgn->rgn_idx = rgn_idx;
+
+ spin_lock_init(&rgn->rgn_lock);
+
+ INIT_LIST_HEAD(&rgn->list_inact_rgn);
+ INIT_LIST_HEAD(&rgn->list_lru_rgn);
+ INIT_LIST_HEAD(&rgn->list_expired_rgn);
+
+ if (rgn_idx == hpb->rgns_per_lu - 1) {
+ srgn_cnt = ((hpb->srgns_per_lu - 1) %
+ hpb->srgns_per_rgn) + 1;
+ last_srgn = true;
+ }
+
+ ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
+ if (ret)
+ goto release_srgn_table;
+ ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
+
+ if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
+ ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
+ if (ret)
+ goto release_srgn_table;
+ } else {
+ rgn->rgn_state = HPB_RGN_INACTIVE;
+ }
+
+ rgn->rgn_flags = 0;
+ rgn->hpb = hpb;
+ }
+
+ hpb->rgn_tbl = rgn_table;
+
+ return 0;
+
+release_srgn_table:
+ for (i = 0; i <= rgn_idx; i++)
+ kvfree(rgn_table[i].srgn_tbl);
+
+ kvfree(rgn_table);
+ return ret;
+}
+
+static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
+ struct ufshpb_region *rgn)
+{
+ int srgn_idx;
+ struct ufshpb_subregion *srgn;
+
+ for_each_sub_region(rgn, srgn_idx, srgn)
+ if (srgn->srgn_state != HPB_SRGN_UNUSED) {
+ srgn->srgn_state = HPB_SRGN_UNUSED;
+ ufshpb_put_map_ctx(hpb, srgn->mctx);
+ }
+}
+
+static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
+{
+ int rgn_idx;
+
+ for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
+ struct ufshpb_region *rgn;
+
+ rgn = hpb->rgn_tbl + rgn_idx;
+ if (rgn->rgn_state != HPB_RGN_INACTIVE) {
+ rgn->rgn_state = HPB_RGN_INACTIVE;
+
+ ufshpb_destroy_subregion_tbl(hpb, rgn);
+ }
+
+ kvfree(rgn->srgn_tbl);
+ }
+
+ kvfree(hpb->rgn_tbl);
+}
+
+/* SYSFS functions */
+#define ufshpb_sysfs_attr_show_func(__name) \
+static ssize_t __name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct scsi_device *sdev = to_scsi_device(dev); \
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
+ \
+ if (!hpb) \
+ return -ENODEV; \
+ \
+ return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \
+} \
+\
+static DEVICE_ATTR_RO(__name)
+
+ufshpb_sysfs_attr_show_func(hit_cnt);
+ufshpb_sysfs_attr_show_func(miss_cnt);
+ufshpb_sysfs_attr_show_func(rb_noti_cnt);
+ufshpb_sysfs_attr_show_func(rb_active_cnt);
+ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
+ufshpb_sysfs_attr_show_func(map_req_cnt);
+ufshpb_sysfs_attr_show_func(umap_req_cnt);
+
+static struct attribute *hpb_dev_stat_attrs[] = {
+ &dev_attr_hit_cnt.attr,
+ &dev_attr_miss_cnt.attr,
+ &dev_attr_rb_noti_cnt.attr,
+ &dev_attr_rb_active_cnt.attr,
+ &dev_attr_rb_inactive_cnt.attr,
+ &dev_attr_map_req_cnt.attr,
+ &dev_attr_umap_req_cnt.attr,
+ NULL,
+};
+
+struct attribute_group ufs_sysfs_hpb_stat_group = {
+ .name = "hpb_stats",
+ .attrs = hpb_dev_stat_attrs,
+};
+
+/* SYSFS functions */
+#define ufshpb_sysfs_param_show_func(__name) \
+static ssize_t __name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct scsi_device *sdev = to_scsi_device(dev); \
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
+ \
+ if (!hpb) \
+ return -ENODEV; \
+ \
+ return sysfs_emit(buf, "%d\n", hpb->params.__name); \
+}
+
+ufshpb_sysfs_param_show_func(requeue_timeout_ms);
+static ssize_t
+requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0)
+ return -EINVAL;
+
+ hpb->params.requeue_timeout_ms = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(requeue_timeout_ms);
+
+ufshpb_sysfs_param_show_func(activation_thld);
+static ssize_t
+activation_thld_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ if (val <= 0)
+ return -EINVAL;
+
+ hpb->params.activation_thld = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(activation_thld);
+
+ufshpb_sysfs_param_show_func(normalization_factor);
+static ssize_t
+normalization_factor_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
+ return -EINVAL;
+
+ hpb->params.normalization_factor = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(normalization_factor);
+
+ufshpb_sysfs_param_show_func(eviction_thld_enter);
+static ssize_t
+eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ if (val <= hpb->params.eviction_thld_exit)
+ return -EINVAL;
+
+ hpb->params.eviction_thld_enter = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(eviction_thld_enter);
+
+ufshpb_sysfs_param_show_func(eviction_thld_exit);
+static ssize_t
+eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ if (val <= hpb->params.activation_thld)
+ return -EINVAL;
+
+ hpb->params.eviction_thld_exit = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(eviction_thld_exit);
+
+ufshpb_sysfs_param_show_func(read_timeout_ms);
+static ssize_t
+read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ /* read_timeout >> timeout_polling_interval */
+ if (val < hpb->params.timeout_polling_interval_ms * 2)
+ return -EINVAL;
+
+ hpb->params.read_timeout_ms = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(read_timeout_ms);
+
+ufshpb_sysfs_param_show_func(read_timeout_expiries);
+static ssize_t
+read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ if (val <= 0)
+ return -EINVAL;
+
+ hpb->params.read_timeout_expiries = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(read_timeout_expiries);
+
+ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
+static ssize_t
+timeout_polling_interval_ms_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ /* timeout_polling_interval << read_timeout */
+ if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
+ return -EINVAL;
+
+ hpb->params.timeout_polling_interval_ms = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(timeout_polling_interval_ms);
+
+ufshpb_sysfs_param_show_func(inflight_map_req);
+static ssize_t inflight_map_req_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+ int val;
+
+ if (!hpb)
+ return -ENODEV;
+
+ if (!hpb->is_hcm)
+ return -EOPNOTSUPP;
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
+ return -EINVAL;
+
+ hpb->params.inflight_map_req = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inflight_map_req);
+
+static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
+{
+ hpb->params.activation_thld = ACTIVATION_THRESHOLD;
+ hpb->params.normalization_factor = 1;
+ hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
+ hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
+ hpb->params.read_timeout_ms = READ_TO_MS;
+ hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
+ hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
+ hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
+}
+
+static struct attribute *hpb_dev_param_attrs[] = {
+ &dev_attr_requeue_timeout_ms.attr,
+ &dev_attr_activation_thld.attr,
+ &dev_attr_normalization_factor.attr,
+ &dev_attr_eviction_thld_enter.attr,
+ &dev_attr_eviction_thld_exit.attr,
+ &dev_attr_read_timeout_ms.attr,
+ &dev_attr_read_timeout_expiries.attr,
+ &dev_attr_timeout_polling_interval_ms.attr,
+ &dev_attr_inflight_map_req.attr,
+ NULL,
+};
+
+struct attribute_group ufs_sysfs_hpb_param_group = {
+ .name = "hpb_params",
+ .attrs = hpb_dev_param_attrs,
+};
+
+static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
+{
+ struct ufshpb_req *pre_req = NULL, *t;
+ int qd = hpb->sdev_ufs_lu->queue_depth / 2;
+ int i;
+
+ INIT_LIST_HEAD(&hpb->lh_pre_req_free);
+
+ hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
+ hpb->throttle_pre_req = qd;
+ hpb->num_inflight_pre_req = 0;
+
+ if (!hpb->pre_req)
+ goto release_mem;
+
+ for (i = 0; i < qd; i++) {
+ pre_req = hpb->pre_req + i;
+ INIT_LIST_HEAD(&pre_req->list_req);
+ pre_req->req = NULL;
+
+ pre_req->bio = bio_alloc(GFP_KERNEL, 1);
+ if (!pre_req->bio)
+ goto release_mem;
+
+ pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pre_req->wb.m_page) {
+ bio_put(pre_req->bio);
+ goto release_mem;
+ }
+
+ list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
+ }
+
+ return 0;
+release_mem:
+ list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
+ list_del_init(&pre_req->list_req);
+ bio_put(pre_req->bio);
+ __free_page(pre_req->wb.m_page);
+ }
+
+ kfree(hpb->pre_req);
+ return -ENOMEM;
+}
+
+static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
+{
+ struct ufshpb_req *pre_req = NULL;
+ int i;
+
+ for (i = 0; i < hpb->throttle_pre_req; i++) {
+ pre_req = hpb->pre_req + i;
+ bio_put(hpb->pre_req[i].bio);
+ if (!pre_req->wb.m_page)
+ __free_page(hpb->pre_req[i].wb.m_page);
+ list_del_init(&pre_req->list_req);
+ }
+
+ kfree(hpb->pre_req);
+}
+
+static void ufshpb_stat_init(struct ufshpb_lu *hpb)
+{
+ hpb->stats.hit_cnt = 0;
+ hpb->stats.miss_cnt = 0;
+ hpb->stats.rb_noti_cnt = 0;
+ hpb->stats.rb_active_cnt = 0;
+ hpb->stats.rb_inactive_cnt = 0;
+ hpb->stats.map_req_cnt = 0;
+ hpb->stats.umap_req_cnt = 0;
+}
+
+static void ufshpb_param_init(struct ufshpb_lu *hpb)
+{
+ hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
+ if (hpb->is_hcm)
+ ufshpb_hcm_param_init(hpb);
+}
+
+static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
+{
+ int ret;
+
+ spin_lock_init(&hpb->rgn_state_lock);
+ spin_lock_init(&hpb->rsp_list_lock);
+ spin_lock_init(&hpb->param_lock);
+
+ INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
+ INIT_LIST_HEAD(&hpb->lh_act_srgn);
+ INIT_LIST_HEAD(&hpb->lh_inact_rgn);
+ INIT_LIST_HEAD(&hpb->list_hpb_lu);
+
+ INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
+ if (hpb->is_hcm) {
+ INIT_WORK(&hpb->ufshpb_normalization_work,
+ ufshpb_normalization_work_handler);
+ INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
+ ufshpb_read_to_handler);
+ }
+
+ hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
+ sizeof(struct ufshpb_req), 0, 0, NULL);
+ if (!hpb->map_req_cache) {
+ dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
+ hpb->lun);
+ return -ENOMEM;
+ }
+
+ hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
+ sizeof(struct page *) * hpb->pages_per_srgn,
+ 0, 0, NULL);
+ if (!hpb->m_page_cache) {
+ dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
+ hpb->lun);
+ ret = -ENOMEM;
+ goto release_req_cache;
+ }
+
+ ret = ufshpb_pre_req_mempool_init(hpb);
+ if (ret) {
+ dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
+ hpb->lun);
+ goto release_m_page_cache;
+ }
+
+ ret = ufshpb_alloc_region_tbl(hba, hpb);
+ if (ret)
+ goto release_pre_req_mempool;
+
+ ufshpb_stat_init(hpb);
+ ufshpb_param_init(hpb);
+
+ if (hpb->is_hcm) {
+ unsigned int poll;
+
+ poll = hpb->params.timeout_polling_interval_ms;
+ schedule_delayed_work(&hpb->ufshpb_read_to_work,
+ msecs_to_jiffies(poll));
+ }
+
+ return 0;
+
+release_pre_req_mempool:
+ ufshpb_pre_req_mempool_destroy(hpb);
+release_m_page_cache:
+ kmem_cache_destroy(hpb->m_page_cache);
+release_req_cache:
+ kmem_cache_destroy(hpb->map_req_cache);
+ return ret;
+}
+
+static struct ufshpb_lu *
+ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
+ struct ufshpb_dev_info *hpb_dev_info,
+ struct ufshpb_lu_info *hpb_lu_info)
+{
+ struct ufshpb_lu *hpb;
+ int ret;
+
+ hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
+ if (!hpb)
+ return NULL;
+
+ hpb->lun = sdev->lun;
+ hpb->sdev_ufs_lu = sdev;
+
+ ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
+
+ ret = ufshpb_lu_hpb_init(hba, hpb);
+ if (ret) {
+ dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
+ goto release_hpb;
+ }
+
+ sdev->hostdata = hpb;
+ return hpb;
+
+release_hpb:
+ kfree(hpb);
+ return NULL;
+}
+
+static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
+{
+ struct ufshpb_region *rgn, *next_rgn;
+ struct ufshpb_subregion *srgn, *next_srgn;
+ unsigned long flags;
+
+ /*
+ * If the device reset occurred, the remaining HPB region information
+ * may be stale. Therefore, by discarding the lists of HPB response
+ * that remained after reset, we prevent unnecessary work.
+ */
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
+ list_inact_rgn)
+ list_del_init(&rgn->list_inact_rgn);
+
+ list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
+ list_act_srgn)
+ list_del_init(&srgn->list_act_srgn);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+}
+
+static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
+{
+ if (hpb->is_hcm) {
+ cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
+ cancel_work_sync(&hpb->ufshpb_normalization_work);
+ }
+ cancel_work_sync(&hpb->map_work);
+}
+
+static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
+{
+ int err = 0;
+ bool flag_res = true;
+ int try;
+
+ /* wait for the device to complete HPB reset query */
+ for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
+ dev_dbg(hba->dev,
+ "%s start flag reset polling %d times\n",
+ __func__, try);
+
+ /* Poll fHpbReset flag to be cleared */
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
+
+ if (err) {
+ dev_err(hba->dev,
+ "%s reading fHpbReset flag failed with error %d\n",
+ __func__, err);
+ return flag_res;
+ }
+
+ if (!flag_res)
+ goto out;
+
+ usleep_range(1000, 1100);
+ }
+ if (flag_res) {
+ dev_err(hba->dev,
+ "%s fHpbReset was not cleared by the device\n",
+ __func__);
+ }
+out:
+ return flag_res;
+}
+
+void ufshpb_reset(struct ufs_hba *hba)
+{
+ struct ufshpb_lu *hpb;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, hba->host) {
+ hpb = ufshpb_get_hpb_data(sdev);
+ if (!hpb)
+ continue;
+
+ if (ufshpb_get_state(hpb) != HPB_RESET)
+ continue;
+
+ ufshpb_set_state(hpb, HPB_PRESENT);
+ }
+}
+
+void ufshpb_reset_host(struct ufs_hba *hba)
+{
+ struct ufshpb_lu *hpb;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, hba->host) {
+ hpb = ufshpb_get_hpb_data(sdev);
+ if (!hpb)
+ continue;
+
+ if (ufshpb_get_state(hpb) != HPB_PRESENT)
+ continue;
+ ufshpb_set_state(hpb, HPB_RESET);
+ ufshpb_cancel_jobs(hpb);
+ ufshpb_discard_rsp_lists(hpb);
+ }
+}
+
+void ufshpb_suspend(struct ufs_hba *hba)
+{
+ struct ufshpb_lu *hpb;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, hba->host) {
+ hpb = ufshpb_get_hpb_data(sdev);
+ if (!hpb)
+ continue;
+
+ if (ufshpb_get_state(hpb) != HPB_PRESENT)
+ continue;
+ ufshpb_set_state(hpb, HPB_SUSPEND);
+ ufshpb_cancel_jobs(hpb);
+ }
+}
+
+void ufshpb_resume(struct ufs_hba *hba)
+{
+ struct ufshpb_lu *hpb;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, hba->host) {
+ hpb = ufshpb_get_hpb_data(sdev);
+ if (!hpb)
+ continue;
+
+ if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
+ (ufshpb_get_state(hpb) != HPB_SUSPEND))
+ continue;
+ ufshpb_set_state(hpb, HPB_PRESENT);
+ ufshpb_kick_map_work(hpb);
+ if (hpb->is_hcm) {
+ unsigned int poll =
+ hpb->params.timeout_polling_interval_ms;
+
+ schedule_delayed_work(&hpb->ufshpb_read_to_work,
+ msecs_to_jiffies(poll));
+ }
+ }
+}
+
+static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
+ struct ufshpb_lu_info *hpb_lu_info)
+{
+ u16 max_active_rgns;
+ u8 lu_enable;
+ int size;
+ int ret;
+ char desc_buf[QUERY_DESC_MAX_SIZE];
+
+ ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
+
+ pm_runtime_get_sync(hba->dev);
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ QUERY_DESC_IDN_UNIT, lun, 0,
+ desc_buf, &size);
+ pm_runtime_put_sync(hba->dev);
+
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: idn: %d lun: %d query request failed",
+ __func__, QUERY_DESC_IDN_UNIT, lun);
+ return ret;
+ }
+
+ lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
+ if (lu_enable != LU_ENABLED_HPB_FUNC)
+ return -ENODEV;
+
+ max_active_rgns = get_unaligned_be16(
+ desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
+ if (!max_active_rgns) {
+ dev_err(hba->dev,
+ "lun %d wrong number of max active regions\n", lun);
+ return -ENODEV;
+ }
+
+ hpb_lu_info->num_blocks = get_unaligned_be64(
+ desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
+ hpb_lu_info->pinned_start = get_unaligned_be16(
+ desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
+ hpb_lu_info->num_pinned = get_unaligned_be16(
+ desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
+ hpb_lu_info->max_active_rgns = max_active_rgns;
+
+ return 0;
+}
+
+void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+
+ if (!hpb)
+ return;
+
+ ufshpb_set_state(hpb, HPB_FAILED);
+
+ sdev = hpb->sdev_ufs_lu;
+ sdev->hostdata = NULL;
+
+ ufshpb_cancel_jobs(hpb);
+
+ ufshpb_pre_req_mempool_destroy(hpb);
+ ufshpb_destroy_region_tbl(hpb);
+
+ kmem_cache_destroy(hpb->map_req_cache);
+ kmem_cache_destroy(hpb->m_page_cache);
+
+ list_del_init(&hpb->list_hpb_lu);
+
+ kfree(hpb);
+}
+
+static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
+{
+ int pool_size;
+ struct ufshpb_lu *hpb;
+ struct scsi_device *sdev;
+ bool init_success;
+
+ if (tot_active_srgn_pages == 0) {
+ ufshpb_remove(hba);
+ return;
+ }
+
+ init_success = !ufshpb_check_hpb_reset_query(hba);
+
+ pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
+ if (pool_size > tot_active_srgn_pages) {
+ mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
+ mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
+ }
+
+ shost_for_each_device(sdev, hba->host) {
+ hpb = ufshpb_get_hpb_data(sdev);
+ if (!hpb)
+ continue;
+
+ if (init_success) {
+ ufshpb_set_state(hpb, HPB_PRESENT);
+ if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
+ queue_work(ufshpb_wq, &hpb->map_work);
+ if (!hpb->is_hcm)
+ ufshpb_issue_umap_all_req(hpb);
+ } else {
+ dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
+ ufshpb_destroy_lu(hba, sdev);
+ }
+ }
+
+ if (!init_success)
+ ufshpb_remove(hba);
+}
+
+void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ struct ufshpb_lu *hpb;
+ int ret;
+ struct ufshpb_lu_info hpb_lu_info = { 0 };
+ int lun = sdev->lun;
+
+ if (lun >= hba->dev_info.max_lu_supported)
+ goto out;
+
+ ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
+ if (ret)
+ goto out;
+
+ hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
+ &hpb_lu_info);
+ if (!hpb)
+ goto out;
+
+ tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
+ hpb->srgns_per_rgn * hpb->pages_per_srgn;
+
+out:
+ /* All LUs are initialized */
+ if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
+ ufshpb_hpb_lu_prepared(hba);
+}
+
+static int ufshpb_init_mem_wq(struct ufs_hba *hba)
+{
+ int ret;
+ unsigned int pool_size;
+
+ ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
+ sizeof(struct ufshpb_map_ctx),
+ 0, 0, NULL);
+ if (!ufshpb_mctx_cache) {
+ dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
+ return -ENOMEM;
+ }
+
+ pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
+ dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
+ __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
+
+ ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
+ ufshpb_mctx_cache);
+ if (!ufshpb_mctx_pool) {
+ dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
+ ret = -ENOMEM;
+ goto release_mctx_cache;
+ }
+
+ ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
+ if (!ufshpb_page_pool) {
+ dev_err(hba->dev, "ufshpb: cannot init page pool\n");
+ ret = -ENOMEM;
+ goto release_mctx_pool;
+ }
+
+ ufshpb_wq = alloc_workqueue("ufshpb-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+ if (!ufshpb_wq) {
+ dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
+ ret = -ENOMEM;
+ goto release_page_pool;
+ }
+
+ return 0;
+
+release_page_pool:
+ mempool_destroy(ufshpb_page_pool);
+release_mctx_pool:
+ mempool_destroy(ufshpb_mctx_pool);
+release_mctx_cache:
+ kmem_cache_destroy(ufshpb_mctx_cache);
+ return ret;
+}
+
+void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
+{
+ struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
+ int max_active_rgns = 0;
+ int hpb_num_lu;
+
+ hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
+ if (hpb_num_lu == 0) {
+ dev_err(hba->dev, "No HPB LU supported\n");
+ hpb_info->hpb_disabled = true;
+ return;
+ }
+
+ hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
+ hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
+ max_active_rgns = get_unaligned_be16(geo_buf +
+ GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
+
+ if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
+ max_active_rgns == 0) {
+ dev_err(hba->dev, "No HPB supported device\n");
+ hpb_info->hpb_disabled = true;
+ return;
+ }
+}
+
+void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
+{
+ struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
+ int version, ret;
+ u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW;
+
+ hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
+
+ version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
+ if ((version != HPB_SUPPORT_VERSION) &&
+ (version != HPB_SUPPORT_LEGACY_VERSION)) {
+ dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
+ __func__, version);
+ hpb_dev_info->hpb_disabled = true;
+ return;
+ }
+
+ if (version == HPB_SUPPORT_LEGACY_VERSION)
+ hpb_dev_info->is_legacy = true;
+
+ pm_runtime_get_sync(hba->dev);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd);
+ pm_runtime_put_sync(hba->dev);
+
+ if (ret)
+ dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed",
+ __func__);
+ hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd;
+
+ /*
+ * Get the number of user logical unit to check whether all
+ * scsi_device finish initialization
+ */
+ hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
+}
+
+void ufshpb_init(struct ufs_hba *hba)
+{
+ struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
+ int try;
+ int ret;
+
+ if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
+ return;
+
+ if (ufshpb_init_mem_wq(hba)) {
+ hpb_dev_info->hpb_disabled = true;
+ return;
+ }
+
+ atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
+ tot_active_srgn_pages = 0;
+ /* issue HPB reset query */
+ for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
+ ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
+ if (!ret)
+ break;
+ }
+}
+
+void ufshpb_remove(struct ufs_hba *hba)
+{
+ mempool_destroy(ufshpb_page_pool);
+ mempool_destroy(ufshpb_mctx_pool);
+ kmem_cache_destroy(ufshpb_mctx_cache);
+
+ destroy_workqueue(ufshpb_wq);
+}
+
+module_param(ufshpb_host_map_kbytes, uint, 0644);
+MODULE_PARM_DESC(ufshpb_host_map_kbytes,
+ "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");
diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h
new file mode 100644
index 000000000000..a79e07398970
--- /dev/null
+++ b/drivers/scsi/ufs/ufshpb.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Universal Flash Storage Host Performance Booster
+ *
+ * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Yongmyung Lee <ymhungry.lee@samsung.com>
+ * Jinyoung Choi <j-young.choi@samsung.com>
+ */
+
+#ifndef _UFSHPB_H_
+#define _UFSHPB_H_
+
+/* hpb response UPIU macro */
+#define HPB_RSP_NONE 0x0
+#define HPB_RSP_REQ_REGION_UPDATE 0x1
+#define HPB_RSP_DEV_RESET 0x2
+#define MAX_ACTIVE_NUM 2
+#define MAX_INACTIVE_NUM 2
+#define DEV_DATA_SEG_LEN 0x14
+#define DEV_SENSE_SEG_LEN 0x12
+#define DEV_DES_TYPE 0x80
+#define DEV_ADDITIONAL_LEN 0x10
+
+/* hpb map & entries macro */
+#define HPB_RGN_SIZE_UNIT 512
+#define HPB_ENTRY_BLOCK_SIZE 4096
+#define HPB_ENTRY_SIZE 0x8
+#define PINNED_NOT_SET U32_MAX
+
+/* hpb support chunk size */
+#define HPB_LEGACY_CHUNK_HIGH 1
+#define HPB_MULTI_CHUNK_LOW 7
+#define HPB_MULTI_CHUNK_HIGH 255
+
+/* hpb vender defined opcode */
+#define UFSHPB_READ 0xF8
+#define UFSHPB_READ_BUFFER 0xF9
+#define UFSHPB_READ_BUFFER_ID 0x01
+#define UFSHPB_WRITE_BUFFER 0xFA
+#define UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID 0x01
+#define UFSHPB_WRITE_BUFFER_PREFETCH_ID 0x02
+#define UFSHPB_WRITE_BUFFER_INACT_ALL_ID 0x03
+#define HPB_WRITE_BUFFER_CMD_LENGTH 10
+#define MAX_HPB_READ_ID 0x7F
+#define HPB_READ_BUFFER_CMD_LENGTH 10
+#define LU_ENABLED_HPB_FUNC 0x02
+
+#define HPB_RESET_REQ_RETRIES 10
+#define HPB_MAP_REQ_RETRIES 5
+#define HPB_REQUEUE_TIME_MS 0
+
+#define HPB_SUPPORT_VERSION 0x200
+#define HPB_SUPPORT_LEGACY_VERSION 0x100
+
+enum UFSHPB_MODE {
+ HPB_HOST_CONTROL,
+ HPB_DEVICE_CONTROL,
+};
+
+enum UFSHPB_STATE {
+ HPB_INIT = 0,
+ HPB_PRESENT = 1,
+ HPB_SUSPEND,
+ HPB_FAILED,
+ HPB_RESET,
+};
+
+enum HPB_RGN_STATE {
+ HPB_RGN_INACTIVE,
+ HPB_RGN_ACTIVE,
+ /* pinned regions are always active */
+ HPB_RGN_PINNED,
+};
+
+enum HPB_SRGN_STATE {
+ HPB_SRGN_UNUSED,
+ HPB_SRGN_INVALID,
+ HPB_SRGN_VALID,
+ HPB_SRGN_ISSUED,
+};
+
+/**
+ * struct ufshpb_lu_info - UFSHPB logical unit related info
+ * @num_blocks: the number of logical block
+ * @pinned_start: the start region number of pinned region
+ * @num_pinned: the number of pinned regions
+ * @max_active_rgns: maximum number of active regions
+ */
+struct ufshpb_lu_info {
+ int num_blocks;
+ int pinned_start;
+ int num_pinned;
+ int max_active_rgns;
+};
+
+struct ufshpb_map_ctx {
+ struct page **m_page;
+ unsigned long *ppn_dirty;
+};
+
+struct ufshpb_subregion {
+ struct ufshpb_map_ctx *mctx;
+ enum HPB_SRGN_STATE srgn_state;
+ int rgn_idx;
+ int srgn_idx;
+ bool is_last;
+
+ /* subregion reads - for host mode */
+ unsigned int reads;
+
+ /* below information is used by rsp_list */
+ struct list_head list_act_srgn;
+};
+
+struct ufshpb_region {
+ struct ufshpb_lu *hpb;
+ struct ufshpb_subregion *srgn_tbl;
+ enum HPB_RGN_STATE rgn_state;
+ int rgn_idx;
+ int srgn_cnt;
+
+ /* below information is used by rsp_list */
+ struct list_head list_inact_rgn;
+
+ /* below information is used by lru */
+ struct list_head list_lru_rgn;
+ unsigned long rgn_flags;
+#define RGN_FLAG_DIRTY 0
+#define RGN_FLAG_UPDATE 1
+
+ /* region reads - for host mode */
+ spinlock_t rgn_lock;
+ unsigned int reads;
+ /* region "cold" timer - for host mode */
+ ktime_t read_timeout;
+ unsigned int read_timeout_expiries;
+ struct list_head list_expired_rgn;
+};
+
+#define for_each_sub_region(rgn, i, srgn) \
+ for ((i) = 0; \
+ ((i) < (rgn)->srgn_cnt) && ((srgn) = &(rgn)->srgn_tbl[i]); \
+ (i)++)
+
+/**
+ * struct ufshpb_req - HPB related request structure (write/read buffer)
+ * @req: block layer request structure
+ * @bio: bio for this request
+ * @hpb: ufshpb_lu structure that related to
+ * @list_req: ufshpb_req mempool list
+ * @sense: store its sense data
+ * @mctx: L2P map information
+ * @rgn_idx: target region index
+ * @srgn_idx: target sub-region index
+ * @lun: target logical unit number
+ * @m_page: L2P map information data for pre-request
+ * @len: length of host-side cached L2P map in m_page
+ * @lpn: start LPN of L2P map in m_page
+ */
+struct ufshpb_req {
+ struct request *req;
+ struct bio *bio;
+ struct ufshpb_lu *hpb;
+ struct list_head list_req;
+ union {
+ struct {
+ struct ufshpb_map_ctx *mctx;
+ unsigned int rgn_idx;
+ unsigned int srgn_idx;
+ unsigned int lun;
+ } rb;
+ struct {
+ struct page *m_page;
+ unsigned int len;
+ unsigned long lpn;
+ } wb;
+ };
+};
+
+struct victim_select_info {
+ struct list_head lh_lru_rgn; /* LRU list of regions */
+ int max_lru_active_cnt; /* supported hpb #region - pinned #region */
+ atomic_t active_cnt;
+};
+
+/**
+ * ufshpb_params - ufs hpb parameters
+ * @requeue_timeout_ms - requeue threshold of wb command (0x2)
+ * @activation_thld - min reads [IOs] to activate/update a region
+ * @normalization_factor - shift right the region's reads
+ * @eviction_thld_enter - min reads [IOs] for the entering region in eviction
+ * @eviction_thld_exit - max reads [IOs] for the exiting region in eviction
+ * @read_timeout_ms - timeout [ms] from the last read IO to the region
+ * @read_timeout_expiries - amount of allowable timeout expireis
+ * @timeout_polling_interval_ms - frequency in which timeouts are checked
+ * @inflight_map_req - number of inflight map requests
+ */
+struct ufshpb_params {
+ unsigned int requeue_timeout_ms;
+ unsigned int activation_thld;
+ unsigned int normalization_factor;
+ unsigned int eviction_thld_enter;
+ unsigned int eviction_thld_exit;
+ unsigned int read_timeout_ms;
+ unsigned int read_timeout_expiries;
+ unsigned int timeout_polling_interval_ms;
+ unsigned int inflight_map_req;
+};
+
+struct ufshpb_stats {
+ u64 hit_cnt;
+ u64 miss_cnt;
+ u64 rb_noti_cnt;
+ u64 rb_active_cnt;
+ u64 rb_inactive_cnt;
+ u64 map_req_cnt;
+ u64 pre_req_cnt;
+ u64 umap_req_cnt;
+};
+
+struct ufshpb_lu {
+ int lun;
+ struct scsi_device *sdev_ufs_lu;
+
+ spinlock_t rgn_state_lock; /* for protect rgn/srgn state */
+ struct ufshpb_region *rgn_tbl;
+
+ atomic_t hpb_state;
+
+ spinlock_t rsp_list_lock;
+ struct list_head lh_act_srgn; /* hold rsp_list_lock */
+ struct list_head lh_inact_rgn; /* hold rsp_list_lock */
+
+ /* pre request information */
+ struct ufshpb_req *pre_req;
+ int num_inflight_pre_req;
+ int throttle_pre_req;
+ int num_inflight_map_req; /* hold param_lock */
+ spinlock_t param_lock;
+
+ struct list_head lh_pre_req_free;
+ int cur_read_id;
+ int pre_req_min_tr_len;
+ int pre_req_max_tr_len;
+
+ /* cached L2P map management worker */
+ struct work_struct map_work;
+
+ /* for selecting victim */
+ struct victim_select_info lru_info;
+ struct work_struct ufshpb_normalization_work;
+ struct delayed_work ufshpb_read_to_work;
+ unsigned long work_data_bits;
+#define TIMEOUT_WORK_RUNNING 0
+
+ /* pinned region information */
+ u32 lu_pinned_start;
+ u32 lu_pinned_end;
+
+ /* HPB related configuration */
+ u32 rgns_per_lu;
+ u32 srgns_per_lu;
+ u32 last_srgn_entries;
+ int srgns_per_rgn;
+ u32 srgn_mem_size;
+ u32 entries_per_rgn_mask;
+ u32 entries_per_rgn_shift;
+ u32 entries_per_srgn;
+ u32 entries_per_srgn_mask;
+ u32 entries_per_srgn_shift;
+ u32 pages_per_srgn;
+
+ bool is_hcm;
+
+ struct ufshpb_stats stats;
+ struct ufshpb_params params;
+
+ struct kmem_cache *map_req_cache;
+ struct kmem_cache *m_page_cache;
+
+ struct list_head list_hpb_lu;
+};
+
+struct ufs_hba;
+struct ufshcd_lrb;
+
+#ifndef CONFIG_SCSI_UFS_HPB
+static int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { return 0; }
+static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {}
+static void ufshpb_resume(struct ufs_hba *hba) {}
+static void ufshpb_suspend(struct ufs_hba *hba) {}
+static void ufshpb_reset(struct ufs_hba *hba) {}
+static void ufshpb_reset_host(struct ufs_hba *hba) {}
+static void ufshpb_init(struct ufs_hba *hba) {}
+static void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) {}
+static void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) {}
+static void ufshpb_remove(struct ufs_hba *hba) {}
+static bool ufshpb_is_allowed(struct ufs_hba *hba) { return false; }
+static void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) {}
+static void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) {}
+static bool ufshpb_is_legacy(struct ufs_hba *hba) { return false; }
+#else
+int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
+void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
+void ufshpb_resume(struct ufs_hba *hba);
+void ufshpb_suspend(struct ufs_hba *hba);
+void ufshpb_reset(struct ufs_hba *hba);
+void ufshpb_reset_host(struct ufs_hba *hba);
+void ufshpb_init(struct ufs_hba *hba);
+void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev);
+void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev);
+void ufshpb_remove(struct ufs_hba *hba);
+bool ufshpb_is_allowed(struct ufs_hba *hba);
+void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf);
+void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf);
+bool ufshpb_is_legacy(struct ufs_hba *hba);
+extern struct attribute_group ufs_sysfs_hpb_stat_group;
+extern struct attribute_group ufs_sysfs_hpb_param_group;
+#endif
+
+#endif /* End of Header */
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index b0deaf4af5a3..c25ce8f0e0af 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -519,7 +519,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
struct virtio_scsi_cmd_req_pi *cmd_pi,
struct scsi_cmnd *sc)
{
- struct request *rq = sc->request;
+ struct request *rq = scsi_cmd_to_rq(sc);
struct blk_integrity *bi;
virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
@@ -543,7 +543,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
struct scsi_cmnd *sc)
{
- u32 tag = blk_mq_unique_tag(sc->request);
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
u16 hwq = blk_mq_unique_tag_to_hwq(tag);
return &vscsi->req_vqs[hwq];
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index edc8a139a60d..6f10a43510fb 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -466,14 +466,16 @@ static int wd719x_abort(struct scsi_cmnd *cmd)
unsigned long flags;
struct wd719x_scb *scb = scsi_cmd_priv(cmd);
struct wd719x *wd = shost_priv(cmd->device->host);
+ struct device *dev = &wd->pdev->dev;
- dev_info(&wd->pdev->dev, "abort command, tag: %x\n", cmd->tag);
+ dev_info(dev, "abort command, tag: %x\n", scsi_cmd_to_rq(cmd)->tag);
- action = /*cmd->tag ? WD719X_CMD_ABORT_TAG : */WD719X_CMD_ABORT;
+ action = WD719X_CMD_ABORT;
spin_lock_irqsave(wd->sh->host_lock, flags);
result = wd719x_direct_cmd(wd, action, cmd->device->id,
- cmd->device->lun, cmd->tag, scb->phys, 0);
+ cmd->device->lun, scsi_cmd_to_rq(cmd)->tag,
+ scb->phys, 0);
wd719x_finish_cmd(scb, DID_ABORT);
spin_unlock_irqrestore(wd->sh->host_lock, flags);
if (result)
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index ec9d399fbbd8..0204e314b482 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -212,7 +212,7 @@ static int scsifront_do_request(struct vscsifrnt_info *info,
memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
- ring_req->timeout_per_command = sc->request->timeout / HZ;
+ ring_req->timeout_per_command = scsi_cmd_to_rq(sc)->timeout / HZ;
for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
ring_req->seg[i] = shadow->seg[i];
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 7eac76cccc4c..f1e5cf8a17d9 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -168,15 +168,13 @@ static const struct parisc_device_id zalon_tbl[] __initconst = {
MODULE_DEVICE_TABLE(parisc, zalon_tbl);
-static int __exit zalon_remove(struct parisc_device *dev)
+static void __exit zalon_remove(struct parisc_device *dev)
{
struct Scsi_Host *host = dev_get_drvdata(&dev->dev);
scsi_remove_host(host);
ncr53c8xx_release(host);
free_irq(dev->irq, host);
-
- return 0;
}
static struct parisc_driver zalon_driver __refdata = {
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h
index eceeb5d160ad..4dbec4063b3d 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h
@@ -16,8 +16,6 @@
#ifndef __ISP_LOCAL_H_INCLUDED__
#define __ISP_LOCAL_H_INCLUDED__
-#include <stdbool.h>
-
#include "isp_global.h"
#include <isp2400_support.h>
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h
index 540b405cc0f7..a3c7f3de6d17 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h
@@ -16,7 +16,7 @@
#ifndef __PRINT_SUPPORT_H_INCLUDED__
#define __PRINT_SUPPORT_H_INCLUDED__
-#include <stdarg.h>
+#include <linux/stdarg.h>
extern int (*sh_css_printf)(const char *fmt, va_list args);
/* depends on host supplied print function in ia_css_init() */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_env.h b/drivers/staging/media/atomisp/pci/ia_css_env.h
index 6b38723b27cd..3b89bbd837a0 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_env.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_env.h
@@ -17,7 +17,7 @@
#define __IA_CSS_ENV_H
#include <type_support.h>
-#include <stdarg.h> /* va_list */
+#include <linux/stdarg.h> /* va_list */
#include "ia_css_types.h"
#include "ia_css_acc_types.h"
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
index 5e6e7447ae00..e37ef4232c55 100644
--- a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
@@ -19,7 +19,7 @@
/*! \file */
#include <type_support.h>
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include "ia_css_types.h"
#include "ia_css_binary.h"
#include "ia_css_frame_public.h"
diff --git a/drivers/staging/media/atomisp/pci/sh_css_internal.h b/drivers/staging/media/atomisp/pci/sh_css_internal.h
index 3c669ec79b68..496faa7297a5 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_internal.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_internal.h
@@ -20,7 +20,7 @@
#include <math_support.h>
#include <type_support.h>
#include <platform_support.h>
-#include <stdarg.h>
+#include <linux/stdarg.h>
#if !defined(ISP2401)
#include "input_formatter.h"
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index c163b14774d7..72171ea3dd53 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -5,7 +5,7 @@ menuconfig TARGET_CORE
depends on BLOCK
select CONFIGFS_FS
select CRC_T10DIF
- select BLK_SCSI_REQUEST
+ select SCSI_COMMON
select SGL_ALLOC
default n
help
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index b044999ad002..072afd070f3e 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -234,7 +234,7 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct cxgbit_device *cdev = csk->com.cdev;
struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
- int ret = -EINVAL;
+ int ret;
if ((!ccmd->setup_ddp) ||
(!test_bit(CSK_DDP_ENABLE, &csk->com.flags)))
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index cbb2118fb35e..52db28d868d5 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -183,7 +183,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
memset(tl_cmd, 0, sizeof(*tl_cmd));
tl_cmd->sc = sc;
- tl_cmd->sc_cmd_tag = sc->request->tag;
+ tl_cmd->sc_cmd_tag = scsi_cmd_to_rq(sc)->tag;
tcm_loop_target_queue_cmd(tl_cmd);
return 0;
@@ -241,7 +241,7 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
- int ret = FAILED;
+ int ret;
/*
* Locate the tcm_loop_hba_t pointer
@@ -249,7 +249,7 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
- sc->request->tag, TMR_ABORT_TASK);
+ scsi_cmd_to_rq(sc)->tag, TMR_ABORT_TASK);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
@@ -261,7 +261,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
- int ret = FAILED;
+ int ret;
/*
* Locate the tcm_loop_hba_t pointer
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 4d3ceee23622..b9f9fb5d7e63 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1389,8 +1389,8 @@ static void sbp_sense_mangle(struct sbp_target_request *req)
(sense[0] & 0x80) | /* valid */
((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
(sense[2] & 0x0f); /* sense_key */
- status[2] = se_cmd->scsi_asc; /* sense_code */
- status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
+ status[2] = 0; /* XXX sense_code */
+ status[3] = 0; /* XXX sense_qualifier */
/* information */
status[4] = sense[3];
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 3bb921345bce..cb1de1ecaaa6 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -428,22 +428,6 @@ out:
return rc;
}
-static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
-{
- /*
- * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
- * The ALUA additional sense code qualifier (ASCQ) is determined
- * by the ALUA primary or secondary access state..
- */
- pr_debug("[%s]: ALUA TG Port not available, "
- "SenseKey: NOT_READY, ASC/ASCQ: "
- "0x04/0x%02x\n",
- cmd->se_tfo->fabric_name, alua_ascq);
-
- cmd->scsi_asc = 0x04;
- cmd->scsi_ascq = alua_ascq;
-}
-
static inline void core_alua_state_nonoptimized(
struct se_cmd *cmd,
unsigned char *cdb,
@@ -458,9 +442,9 @@ static inline void core_alua_state_nonoptimized(
cmd->alua_nonop_delay = nonop_delay_msecs;
}
-static inline int core_alua_state_lba_dependent(
+static inline sense_reason_t core_alua_state_lba_dependent(
struct se_cmd *cmd,
- struct t10_alua_tg_pt_gp *tg_pt_gp)
+ u16 tg_pt_gp_id)
{
struct se_device *dev = cmd->se_dev;
u64 segment_size, segment_mult, sectors, lba;
@@ -506,23 +490,19 @@ static inline int core_alua_state_lba_dependent(
}
if (!cur_map) {
spin_unlock(&dev->t10_alua.lba_map_lock);
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
- return 1;
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
}
list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
lba_map_mem_list) {
- if (map_mem->lba_map_mem_alua_pg_id !=
- tg_pt_gp->tg_pt_gp_id)
+ if (map_mem->lba_map_mem_alua_pg_id != tg_pt_gp_id)
continue;
switch(map_mem->lba_map_mem_alua_state) {
case ALUA_ACCESS_STATE_STANDBY:
spin_unlock(&dev->t10_alua.lba_map_lock);
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
- return 1;
+ return TCM_ALUA_TG_PT_STANDBY;
case ALUA_ACCESS_STATE_UNAVAILABLE:
spin_unlock(&dev->t10_alua.lba_map_lock);
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
- return 1;
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
default:
break;
}
@@ -532,7 +512,7 @@ static inline int core_alua_state_lba_dependent(
return 0;
}
-static inline int core_alua_state_standby(
+static inline sense_reason_t core_alua_state_standby(
struct se_cmd *cmd,
unsigned char *cdb)
{
@@ -556,24 +536,21 @@ static inline int core_alua_state_standby(
case SAI_READ_CAPACITY_16:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
- return 1;
+ return TCM_ALUA_TG_PT_STANDBY;
}
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
- return 1;
+ return TCM_ALUA_TG_PT_STANDBY;
}
case MAINTENANCE_OUT:
switch (cdb[1]) {
case MO_SET_TARGET_PGS:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
- return 1;
+ return TCM_ALUA_TG_PT_STANDBY;
}
case REQUEST_SENSE:
case PERSISTENT_RESERVE_IN:
@@ -582,14 +559,13 @@ static inline int core_alua_state_standby(
case WRITE_BUFFER:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
- return 1;
+ return TCM_ALUA_TG_PT_STANDBY;
}
return 0;
}
-static inline int core_alua_state_unavailable(
+static inline sense_reason_t core_alua_state_unavailable(
struct se_cmd *cmd,
unsigned char *cdb)
{
@@ -606,30 +582,27 @@ static inline int core_alua_state_unavailable(
case MI_REPORT_TARGET_PGS:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
- return 1;
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
}
case MAINTENANCE_OUT:
switch (cdb[1]) {
case MO_SET_TARGET_PGS:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
- return 1;
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
}
case REQUEST_SENSE:
case READ_BUFFER:
case WRITE_BUFFER:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
- return 1;
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
}
return 0;
}
-static inline int core_alua_state_transition(
+static inline sense_reason_t core_alua_state_transition(
struct se_cmd *cmd,
unsigned char *cdb)
{
@@ -646,16 +619,14 @@ static inline int core_alua_state_transition(
case MI_REPORT_TARGET_PGS:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
- return 1;
+ return TCM_ALUA_STATE_TRANSITION;
}
case REQUEST_SENSE:
case READ_BUFFER:
case WRITE_BUFFER:
return 0;
default:
- set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
- return 1;
+ return TCM_ALUA_STATE_TRANSITION;
}
return 0;
@@ -674,6 +645,8 @@ target_alua_state_check(struct se_cmd *cmd)
struct se_lun *lun = cmd->se_lun;
struct t10_alua_tg_pt_gp *tg_pt_gp;
int out_alua_state, nonop_delay_msecs;
+ u16 tg_pt_gp_id;
+ sense_reason_t rc = TCM_NO_SENSE;
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0;
@@ -687,8 +660,7 @@ target_alua_state_check(struct se_cmd *cmd)
if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
pr_debug("ALUA: Got secondary offline status for local"
" target port\n");
- set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
- return TCM_CHECK_CONDITION_NOT_READY;
+ return TCM_ALUA_OFFLINE;
}
if (!lun->lun_tg_pt_gp)
@@ -698,8 +670,8 @@ target_alua_state_check(struct se_cmd *cmd)
tg_pt_gp = lun->lun_tg_pt_gp;
out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
+ tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
- // XXX: keeps using tg_pt_gp witout reference after unlock
spin_unlock(&lun->lun_tg_pt_gp_lock);
/*
* Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
@@ -715,20 +687,16 @@ target_alua_state_check(struct se_cmd *cmd)
core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
break;
case ALUA_ACCESS_STATE_STANDBY:
- if (core_alua_state_standby(cmd, cdb))
- return TCM_CHECK_CONDITION_NOT_READY;
+ rc = core_alua_state_standby(cmd, cdb);
break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
- if (core_alua_state_unavailable(cmd, cdb))
- return TCM_CHECK_CONDITION_NOT_READY;
+ rc = core_alua_state_unavailable(cmd, cdb);
break;
case ALUA_ACCESS_STATE_TRANSITION:
- if (core_alua_state_transition(cmd, cdb))
- return TCM_CHECK_CONDITION_NOT_READY;
+ rc = core_alua_state_transition(cmd, cdb);
break;
case ALUA_ACCESS_STATE_LBA_DEPENDENT:
- if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
- return TCM_CHECK_CONDITION_NOT_READY;
+ rc = core_alua_state_lba_dependent(cmd, tg_pt_gp_id);
break;
/*
* OFFLINE is a secondary ALUA target port group access state, that is
@@ -738,10 +706,16 @@ target_alua_state_check(struct se_cmd *cmd)
default:
pr_err("Unknown ALUA access state: 0x%02x\n",
out_alua_state);
- return TCM_INVALID_CDB_FIELD;
+ rc = TCM_INVALID_CDB_FIELD;
}
- return 0;
+ if (rc && rc != TCM_INVALID_CDB_FIELD) {
+ pr_debug("[%s]: ALUA TG Port not available, "
+ "SenseKey: NOT_READY, ASC/rc: 0x04/%d\n",
+ cmd->se_tfo->fabric_name, rc);
+ }
+
+ return rc;
}
/*
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 44d9d028f716..4069a1edcfa3 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -83,7 +83,7 @@ static int iblock_configure_device(struct se_device *dev)
struct blk_integrity *bi;
fmode_t mode;
unsigned int max_write_zeroes_sectors;
- int ret = -ENOMEM;
+ int ret;
if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
pr_err("Missing udev_path= parameters for IBLOCK\n");
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 2629d2ef3970..75ef52f008ff 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -620,17 +620,17 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
buf = transport_kmap_data_sg(cmd);
if (!buf) {
; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
- }
-
- if (cdb[0] == MODE_SENSE_10) {
- if (!(buf[3] & 0x80))
- buf[3] |= 0x80;
} else {
- if (!(buf[2] & 0x80))
- buf[2] |= 0x80;
- }
+ if (cdb[0] == MODE_SENSE_10) {
+ if (!(buf[3] & 0x80))
+ buf[3] |= 0x80;
+ } else {
+ if (!(buf[2] & 0x80))
+ buf[2] |= 0x80;
+ }
- transport_kunmap_data_sg(cmd);
+ transport_kunmap_data_sg(cmd);
+ }
}
}
after_mode_sense:
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 26ceabe34de5..14c6f2bb1b01 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -736,8 +736,7 @@ static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
- transport_generic_request_failure(cmd,
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
+ transport_generic_request_failure(cmd, cmd->sense_reason);
}
/*
@@ -855,7 +854,8 @@ static bool target_cmd_interrupted(struct se_cmd *cmd)
}
/* May be called from interrupt context so must not sleep. */
-void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status,
+ sense_reason_t sense_reason)
{
struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
int success, cpu;
@@ -865,6 +865,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
return;
cmd->scsi_status = scsi_status;
+ cmd->sense_reason = sense_reason;
spin_lock_irqsave(&cmd->t_state_lock, flags);
switch (cmd->scsi_status) {
@@ -893,6 +894,14 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
queue_work_on(cpu, target_completion_wq, &cmd->work);
}
+EXPORT_SYMBOL(target_complete_cmd_with_sense);
+
+void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+{
+ target_complete_cmd_with_sense(cmd, scsi_status, scsi_status ?
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE :
+ TCM_NO_SENSE);
+}
EXPORT_SYMBOL(target_complete_cmd);
void target_set_cmd_data_length(struct se_cmd *cmd, int length)
@@ -2003,7 +2012,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_ADDRESS_OUT_OF_RANGE:
case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
- case TCM_CHECK_CONDITION_NOT_READY:
case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
@@ -2013,6 +2021,10 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_TOO_MANY_SEGMENT_DESCS:
case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
case TCM_INVALID_FIELD_IN_COMMAND_IU:
+ case TCM_ALUA_TG_PT_STANDBY:
+ case TCM_ALUA_TG_PT_UNAVAILABLE:
+ case TCM_ALUA_STATE_TRANSITION:
+ case TCM_ALUA_OFFLINE:
break;
case TCM_OUT_OF_RESOURCES:
cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
@@ -3277,9 +3289,6 @@ static const struct sense_detail sense_detail_table[] = {
[TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
.key = UNIT_ATTENTION,
},
- [TCM_CHECK_CONDITION_NOT_READY] = {
- .key = NOT_READY,
- },
[TCM_MISCOMPARE_VERIFY] = {
.key = MISCOMPARE,
.asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
@@ -3340,6 +3349,26 @@ static const struct sense_detail sense_detail_table[] = {
.asc = 0x0e,
.ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */
},
+ [TCM_ALUA_TG_PT_STANDBY] = {
+ .key = NOT_READY,
+ .asc = 0x04,
+ .ascq = ASCQ_04H_ALUA_TG_PT_STANDBY,
+ },
+ [TCM_ALUA_TG_PT_UNAVAILABLE] = {
+ .key = NOT_READY,
+ .asc = 0x04,
+ .ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE,
+ },
+ [TCM_ALUA_STATE_TRANSITION] = {
+ .key = NOT_READY,
+ .asc = 0x04,
+ .ascq = ASCQ_04H_ALUA_STATE_TRANSITION,
+ },
+ [TCM_ALUA_OFFLINE] = {
+ .key = NOT_READY,
+ .asc = 0x04,
+ .ascq = ASCQ_04H_ALUA_OFFLINE,
+ },
};
/**
@@ -3374,11 +3403,8 @@ static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
cmd->scsi_status = SAM_STAT_BUSY;
return;
}
- } else if (sd->asc == 0) {
- WARN_ON_ONCE(cmd->scsi_asc == 0);
- asc = cmd->scsi_asc;
- ascq = cmd->scsi_ascq;
} else {
+ WARN_ON_ONCE(sd->asc == 0);
asc = sd->asc;
ascq = sd->ascq;
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index fbb6ffaddfbe..9f552f48084c 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -191,6 +191,7 @@ struct tcmu_cmd {
unsigned long deadline;
#define TCMU_CMD_BIT_EXPIRED 0
+#define TCMU_CMD_BIT_KEEP_BUF 1
unsigned long flags;
};
@@ -1315,11 +1316,13 @@ unlock:
mutex_unlock(&udev->cmdr_lock);
}
-static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
+static bool tcmu_handle_completion(struct tcmu_cmd *cmd,
+ struct tcmu_cmd_entry *entry, bool keep_buf)
{
struct se_cmd *se_cmd = cmd->se_cmd;
struct tcmu_dev *udev = cmd->tcmu_dev;
bool read_len_valid = false;
+ bool ret = true;
uint32_t read_len;
/*
@@ -1330,6 +1333,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
WARN_ON_ONCE(se_cmd);
goto out;
}
+ if (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
+ pr_err("cmd_id %u already completed with KEEP_BUF, ring is broken\n",
+ entry->hdr.cmd_id);
+ set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
+ ret = false;
+ goto out;
+ }
list_del_init(&cmd->queue_entry);
@@ -1379,8 +1389,22 @@ done:
target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
out:
- tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
- tcmu_free_cmd(cmd);
+ if (!keep_buf) {
+ tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+ tcmu_free_cmd(cmd);
+ } else {
+ /*
+ * Keep this command after completion, since userspace still
+ * needs the data buffer. Mark it with TCMU_CMD_BIT_KEEP_BUF
+ * and reset potential TCMU_CMD_BIT_EXPIRED, so we don't accept
+ * a second completion later.
+ * Userspace can free the buffer later by writing the cmd_id
+ * to new action attribute free_kept_buf.
+ */
+ clear_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
+ set_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags);
+ }
+ return ret;
}
static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
@@ -1432,6 +1456,7 @@ static bool tcmu_handle_completions(struct tcmu_dev *udev)
while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned;
+ bool keep_buf;
/*
* Flush max. up to end of cmd ring since current entry might
@@ -1453,7 +1478,11 @@ static bool tcmu_handle_completions(struct tcmu_dev *udev)
}
WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
- cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
+ keep_buf = !!(entry->hdr.uflags & TCMU_UFLAG_KEEP_BUF);
+ if (keep_buf)
+ cmd = xa_load(&udev->commands, entry->hdr.cmd_id);
+ else
+ cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
if (!cmd) {
pr_err("cmd_id %u not found, ring is broken\n",
entry->hdr.cmd_id);
@@ -1461,7 +1490,8 @@ static bool tcmu_handle_completions(struct tcmu_dev *udev)
return false;
}
- tcmu_handle_completion(cmd, entry);
+ if (!tcmu_handle_completion(cmd, entry, keep_buf))
+ break;
UPDATE_HEAD(udev->cmdr_last_cleaned,
tcmu_hdr_get_len(entry->hdr.len_op),
@@ -1619,7 +1649,8 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
{
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ||
+ test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
kmem_cache_free(tcmu_cmd_cache, cmd);
return 0;
}
@@ -1903,6 +1934,38 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
static int tcmu_release(struct uio_info *info, struct inode *inode)
{
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
+ struct tcmu_cmd *cmd;
+ unsigned long i;
+ bool freed = false;
+
+ mutex_lock(&udev->cmdr_lock);
+
+ xa_for_each(&udev->commands, i, cmd) {
+ /* Cmds with KEEP_BUF set are no longer on the ring, but
+ * userspace still holds the data buffer. If userspace closes
+ * we implicitly free these cmds and buffers, since after new
+ * open the (new ?) userspace cannot find the cmd in the ring
+ * and thus never will release the buffer by writing cmd_id to
+ * free_kept_buf action attribute.
+ */
+ if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags))
+ continue;
+ pr_debug("removing KEEP_BUF cmd %u on dev %s from ring\n",
+ cmd->cmd_id, udev->name);
+ freed = true;
+
+ xa_erase(&udev->commands, i);
+ tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+ tcmu_free_cmd(cmd);
+ }
+ /*
+ * We only freed data space, not ring space. Therefore we dont call
+ * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
+ */
+ if (freed && list_empty(&udev->tmr_queue))
+ run_qfull_queue(udev, false);
+
+ mutex_unlock(&udev->cmdr_lock);
clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
@@ -2147,7 +2210,8 @@ static int tcmu_configure_device(struct se_device *dev)
mb->version = TCMU_MAILBOX_VERSION;
mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC |
TCMU_MAILBOX_FLAG_CAP_READ_LEN |
- TCMU_MAILBOX_FLAG_CAP_TMR;
+ TCMU_MAILBOX_FLAG_CAP_TMR |
+ TCMU_MAILBOX_FLAG_CAP_KEEP_BUF;
mb->cmdr_off = CMDR_OFF;
mb->cmdr_size = udev->cmdr_size;
@@ -2279,12 +2343,16 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
mutex_lock(&udev->cmdr_lock);
xa_for_each(&udev->commands, i, cmd) {
- pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
- cmd->cmd_id, udev->name,
- test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
+ pr_debug("removing cmd %u on dev %s from ring %s\n",
+ cmd->cmd_id, udev->name,
+ test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ?
+ "(is expired)" :
+ (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags) ?
+ "(is keep buffer)" : ""));
xa_erase(&udev->commands, i);
- if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) &&
+ !test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
WARN_ON(!cmd->se_cmd);
list_del_init(&cmd->queue_entry);
cmd->se_cmd->priv = NULL;
@@ -2933,6 +3001,65 @@ static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
}
CONFIGFS_ATTR_WO(tcmu_, reset_ring);
+static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_device *se_dev = container_of(to_config_group(item),
+ struct se_device,
+ dev_action_group);
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+ struct tcmu_cmd *cmd;
+ u16 cmd_id;
+ int ret;
+
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou16(page, 0, &cmd_id);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&udev->cmdr_lock);
+
+ {
+ XA_STATE(xas, &udev->commands, cmd_id);
+
+ xas_lock(&xas);
+ cmd = xas_load(&xas);
+ if (!cmd) {
+ pr_err("free_kept_buf: cmd_id %d not found\n", cmd_id);
+ count = -EINVAL;
+ xas_unlock(&xas);
+ goto out_unlock;
+ }
+ if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
+ pr_err("free_kept_buf: cmd_id %d was not completed with KEEP_BUF\n",
+ cmd_id);
+ count = -EINVAL;
+ xas_unlock(&xas);
+ goto out_unlock;
+ }
+ xas_store(&xas, NULL);
+ xas_unlock(&xas);
+ }
+
+ tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+ tcmu_free_cmd(cmd);
+ /*
+ * We only freed data space, not ring space. Therefore we dont call
+ * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
+ */
+ if (list_empty(&udev->tmr_queue))
+ run_qfull_queue(udev, false);
+
+out_unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return count;
+}
+CONFIGFS_ATTR_WO(tcmu_, free_kept_buf);
+
static struct configfs_attribute *tcmu_attrib_attrs[] = {
&tcmu_attr_cmd_time_out,
&tcmu_attr_qfull_time_out,
@@ -2951,6 +3078,7 @@ static struct configfs_attribute **tcmu_attrs;
static struct configfs_attribute *tcmu_action_attrs[] = {
&tcmu_attr_block_dev,
&tcmu_attr_reset_ring,
+ &tcmu_attr_free_kept_buf,
NULL,
};
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 0f1319336f3e..d4fe7cb2bd00 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -674,12 +674,16 @@ static void target_xcopy_do_work(struct work_struct *work)
unsigned int max_sectors;
int rc = 0;
unsigned short nolb, max_nolb, copied_nolb = 0;
+ sense_reason_t sense_rc;
- if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE)
+ sense_rc = target_parse_xcopy_cmd(xop);
+ if (sense_rc != TCM_NO_SENSE)
goto err_free;
- if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev))
+ if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev)) {
+ sense_rc = TCM_INVALID_PARAMETER_LIST;
goto err_free;
+ }
src_dev = xop->src_dev;
dst_dev = xop->dst_dev;
@@ -762,20 +766,20 @@ static void target_xcopy_do_work(struct work_struct *work)
return;
out:
+ /*
+ * The XCOPY command was aborted after some data was transferred.
+ * Terminate command with CHECK CONDITION status, with the sense key
+ * set to COPY ABORTED.
+ */
+ sense_rc = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
xcopy_pt_undepend_remotedev(xop);
target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
err_free:
kfree(xop);
- /*
- * Don't override an error scsi status if it has already been set
- */
- if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
- pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
- " CHECK_CONDITION -> sending response\n", rc);
- ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
- }
- target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
+ pr_warn_ratelimited("target_xcopy_do_work: rc: %d, sense: %u, XCOPY operation failed\n",
+ rc, sense_rc);
+ target_complete_cmd_with_sense(ec_cmd, SAM_STAT_CHECK_CONDITION, sense_rc);
}
/*
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 5a86cffd78f6..4310cb342a9f 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -18,10 +18,10 @@
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/thermal.h>
+#include <linux/units.h>
#include <trace/events/thermal.h>
-#define HZ_PER_KHZ 1000
#define SCALE_ERROR_MITIGATION 100
/**
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index e4299ca3423c..c83ea5d04a1d 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -90,3 +90,12 @@ config INTEL_TCC_COOLING
Note that, on different platforms, the behavior might be different
on how fast the setting takes effect, and how much the CPU frequency
is reduced.
+
+config INTEL_MENLOW
+ tristate "Thermal Management driver for Intel menlow platform"
+ depends on ACPI_THERMAL
+ help
+ ACPI thermal management enhancement driver on
+ Intel Menlow platform.
+
+ If unsure, say N.
diff --git a/drivers/thermal/intel/Makefile b/drivers/thermal/intel/Makefile
index 5ff2afa388f7..960b56268b4a 100644
--- a/drivers/thermal/intel/Makefile
+++ b/drivers/thermal/intel/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_INTEL_TCC_COOLING) += intel_tcc_cooling.o
obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
+obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 823354a1a91a..19926beeb3b7 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -108,9 +108,12 @@ static struct attribute *imok_attr[] = {
NULL
};
+static const struct attribute_group imok_attribute_group = {
+ .attrs = imok_attr,
+};
+
static const struct attribute_group data_attribute_group = {
.bin_attrs = data_attributes,
- .attrs = imok_attr,
};
static ssize_t available_uuids_show(struct device *dev,
@@ -522,6 +525,12 @@ static int int3400_thermal_probe(struct platform_device *pdev)
if (result)
goto free_rel_misc;
+ if (acpi_has_method(priv->adev->handle, "IMOK")) {
+ result = sysfs_create_group(&pdev->dev.kobj, &imok_attribute_group);
+ if (result)
+ goto free_imok;
+ }
+
if (priv->data_vault) {
result = sysfs_create_group(&pdev->dev.kobj,
&data_attribute_group);
@@ -545,6 +554,8 @@ free_sysfs:
}
free_uuid:
sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
+free_imok:
+ sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group);
free_rel_misc:
if (!priv->rel_misc_dev_res)
acpi_thermal_rel_misc_device_remove(priv->adev->handle);
@@ -573,6 +584,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
if (priv->data_vault)
sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
+ sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group);
thermal_zone_device_unregister(priv->thermal);
kfree(priv->data_vault);
kfree(priv->trts);
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/thermal/intel/intel_menlow.c
index 101d7e791a13..101d7e791a13 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/thermal/intel/intel_menlow.c
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index b0eb5ece9243..a5b58ea89cc6 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -528,7 +528,7 @@ static int start_power_clamp(void)
set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
/* prevent cpu hotplug */
- get_online_cpus();
+ cpus_read_lock();
/* prefer BSP */
control_cpu = 0;
@@ -542,7 +542,7 @@ static int start_power_clamp(void)
for_each_online_cpu(cpu) {
start_power_clamp_worker(cpu);
}
- put_online_cpus();
+ cpus_read_unlock();
return 0;
}
diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c
index 8ec10d55d421..cd80c7db4073 100644
--- a/drivers/thermal/intel/intel_tcc_cooling.c
+++ b/drivers/thermal/intel/intel_tcc_cooling.c
@@ -79,6 +79,8 @@ static const struct x86_cpu_id tcc_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
{}
};
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index 8d5ac2df26dc..7d942f71e532 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -31,3 +31,13 @@ config QCOM_SPMI_TEMP_ALARM
trip points. The temperature reported by the thermal sensor reflects the
real time die temperature if an ADC is present or an estimate of the
temperature based upon the over temperature stage value.
+
+config QCOM_LMH
+ tristate "Qualcomm Limits Management Hardware"
+ depends on ARCH_QCOM
+ help
+ This enables initialization of Qualcomm limits management
+ hardware(LMh). LMh allows for hardware-enforced mitigation for cpus based on
+ input from temperature and current sensors. On many newer Qualcomm SoCs
+ LMh is configured in the firmware and this feature need not be enabled.
+ However, on certain SoCs like sdm845 LMh has to be configured from kernel.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 252ea7d9da0b..0fa2512042e7 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -5,3 +5,4 @@ qcom_tsens-y += tsens.o tsens-v2.o tsens-v1.o tsens-v0_1.o \
tsens-8960.o
obj-$(CONFIG_QCOM_SPMI_ADC_TM5) += qcom-spmi-adc-tm5.o
obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o
+obj-$(CONFIG_QCOM_LMH) += lmh.o
diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
new file mode 100644
index 000000000000..eafa7526eb8b
--- /dev/null
+++ b/drivers/thermal/qcom/lmh.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (C) 2021, Linaro Limited. All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/qcom_scm.h>
+
+#define LMH_NODE_DCVS 0x44435653
+#define LMH_CLUSTER0_NODE_ID 0x6370302D
+#define LMH_CLUSTER1_NODE_ID 0x6370312D
+
+#define LMH_SUB_FN_THERMAL 0x54484D4C
+#define LMH_SUB_FN_CRNT 0x43524E54
+#define LMH_SUB_FN_REL 0x52454C00
+#define LMH_SUB_FN_BCL 0x42434C00
+
+#define LMH_ALGO_MODE_ENABLE 0x454E424C
+#define LMH_TH_HI_THRESHOLD 0x48494748
+#define LMH_TH_LOW_THRESHOLD 0x4C4F5700
+#define LMH_TH_ARM_THRESHOLD 0x41524D00
+
+#define LMH_REG_DCVS_INTR_CLR 0x8
+
+struct lmh_hw_data {
+ void __iomem *base;
+ struct irq_domain *domain;
+ int irq;
+};
+
+static irqreturn_t lmh_handle_irq(int hw_irq, void *data)
+{
+ struct lmh_hw_data *lmh_data = data;
+ int irq = irq_find_mapping(lmh_data->domain, 0);
+
+ /* Call the cpufreq driver to handle the interrupt */
+ if (irq)
+ generic_handle_irq(irq);
+
+ return 0;
+}
+
+static void lmh_enable_interrupt(struct irq_data *d)
+{
+ struct lmh_hw_data *lmh_data = irq_data_get_irq_chip_data(d);
+
+ /* Clear the existing interrupt */
+ writel(0xff, lmh_data->base + LMH_REG_DCVS_INTR_CLR);
+ enable_irq(lmh_data->irq);
+}
+
+static void lmh_disable_interrupt(struct irq_data *d)
+{
+ struct lmh_hw_data *lmh_data = irq_data_get_irq_chip_data(d);
+
+ disable_irq_nosync(lmh_data->irq);
+}
+
+static struct irq_chip lmh_irq_chip = {
+ .name = "lmh",
+ .irq_enable = lmh_enable_interrupt,
+ .irq_disable = lmh_disable_interrupt
+};
+
+static int lmh_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ struct lmh_hw_data *lmh_data = d->host_data;
+
+ irq_set_chip_and_handler(irq, &lmh_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, lmh_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops lmh_irq_ops = {
+ .map = lmh_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int lmh_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *cpu_node;
+ struct lmh_hw_data *lmh_data;
+ int temp_low, temp_high, temp_arm, cpu_id, ret;
+ u32 node_id;
+
+ lmh_data = devm_kzalloc(dev, sizeof(*lmh_data), GFP_KERNEL);
+ if (!lmh_data)
+ return -ENOMEM;
+
+ lmh_data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(lmh_data->base))
+ return PTR_ERR(lmh_data->base);
+
+ cpu_node = of_parse_phandle(np, "cpus", 0);
+ if (!cpu_node)
+ return -EINVAL;
+ cpu_id = of_cpu_node_to_id(cpu_node);
+ of_node_put(cpu_node);
+
+ ret = of_property_read_u32(np, "qcom,lmh-temp-high-millicelsius", &temp_high);
+ if (ret) {
+ dev_err(dev, "missing qcom,lmh-temp-high-millicelsius property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "qcom,lmh-temp-low-millicelsius", &temp_low);
+ if (ret) {
+ dev_err(dev, "missing qcom,lmh-temp-low-millicelsius property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "qcom,lmh-temp-arm-millicelsius", &temp_arm);
+ if (ret) {
+ dev_err(dev, "missing qcom,lmh-temp-arm-millicelsius property\n");
+ return ret;
+ }
+
+ /*
+ * Only sdm845 has lmh hardware currently enabled from hlos. If this is needed
+ * for other platforms, revisit this to check if the <cpu-id, node-id> should be part
+ * of a dt match table.
+ */
+ if (cpu_id == 0) {
+ node_id = LMH_CLUSTER0_NODE_ID;
+ } else if (cpu_id == 4) {
+ node_id = LMH_CLUSTER1_NODE_ID;
+ } else {
+ dev_err(dev, "Wrong CPU id associated with LMh node\n");
+ return -EINVAL;
+ }
+
+ if (!qcom_scm_lmh_dcvsh_available())
+ return -EINVAL;
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_CRNT, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling current subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_REL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling reliability subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_BCL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling BCL subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret) {
+ dev_err(dev, "Error %d enabling thermal subfunction\n", ret);
+ return ret;
+ }
+
+ ret = qcom_scm_lmh_profile_change(0x1);
+ if (ret) {
+ dev_err(dev, "Error %d changing profile\n", ret);
+ return ret;
+ }
+
+ /* Set default thermal trips */
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_TH_ARM_THRESHOLD, temp_arm,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret) {
+ dev_err(dev, "Error setting thermal ARM threshold%d\n", ret);
+ return ret;
+ }
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_TH_HI_THRESHOLD, temp_high,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret) {
+ dev_err(dev, "Error setting thermal HI threshold%d\n", ret);
+ return ret;
+ }
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_TH_LOW_THRESHOLD, temp_low,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret) {
+ dev_err(dev, "Error setting thermal ARM threshold%d\n", ret);
+ return ret;
+ }
+
+ lmh_data->irq = platform_get_irq(pdev, 0);
+ lmh_data->domain = irq_domain_add_linear(np, 1, &lmh_irq_ops, lmh_data);
+ if (!lmh_data->domain) {
+ dev_err(dev, "Error adding irq_domain\n");
+ return -EINVAL;
+ }
+
+ /* Disable the irq and let cpufreq enable it when ready to handle the interrupt */
+ irq_set_status_flags(lmh_data->irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(dev, lmh_data->irq, lmh_handle_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "lmh-irq", lmh_data);
+ if (ret) {
+ dev_err(dev, "Error %d registering irq %x\n", ret, lmh_data->irq);
+ irq_domain_remove(lmh_data->domain);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id lmh_table[] = {
+ { .compatible = "qcom,sdm845-lmh", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lmh_table);
+
+static struct platform_driver lmh_driver = {
+ .probe = lmh_probe,
+ .driver = {
+ .name = "qcom-lmh",
+ .of_match_table = lmh_table,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(lmh_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QCOM LMh driver");
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
index 232fd0b33325..8494cc04aa21 100644
--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -359,6 +359,12 @@ static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm)
&adc_tm->channels[i],
&adc_tm5_ops);
if (IS_ERR(tzd)) {
+ if (PTR_ERR(tzd) == -ENODEV) {
+ dev_warn(adc_tm->dev, "thermal sensor on channel %d is not used\n",
+ adc_tm->channels[i].channel);
+ continue;
+ }
+
dev_err(adc_tm->dev, "Error registering TZ zone for channel %d: %ld\n",
adc_tm->channels[i].channel, PTR_ERR(tzd));
return PTR_ERR(tzd);
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index fdf16aa34eb4..85228d308dd3 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -84,7 +84,7 @@ struct rcar_gen3_thermal_tsc {
struct thermal_zone_device *zone;
struct equation_coefs coef;
int tj_t;
- int id; /* thermal channel id */
+ unsigned int id; /* thermal channel id */
};
struct rcar_gen3_thermal_priv {
@@ -190,10 +190,64 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = {
+static int rcar_gen3_thermal_mcelsius_to_temp(struct rcar_gen3_thermal_tsc *tsc,
+ int mcelsius)
+{
+ int celsius, val;
+
+ celsius = DIV_ROUND_CLOSEST(mcelsius, 1000);
+ if (celsius <= INT_FIXPT(tsc->tj_t))
+ val = celsius * tsc->coef.a1 + tsc->coef.b1;
+ else
+ val = celsius * tsc->coef.a2 + tsc->coef.b2;
+
+ return INT_FIXPT(val);
+}
+
+static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high)
+{
+ struct rcar_gen3_thermal_tsc *tsc = devdata;
+ u32 irqmsk = 0;
+
+ if (low != -INT_MAX) {
+ irqmsk |= IRQ_TEMPD1;
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQTEMP1,
+ rcar_gen3_thermal_mcelsius_to_temp(tsc, low));
+ }
+
+ if (high != INT_MAX) {
+ irqmsk |= IRQ_TEMP2;
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQTEMP2,
+ rcar_gen3_thermal_mcelsius_to_temp(tsc, high));
+ }
+
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, irqmsk);
+
+ return 0;
+}
+
+static struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = {
.get_temp = rcar_gen3_thermal_get_temp,
+ .set_trips = rcar_gen3_thermal_set_trips,
};
+static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data)
+{
+ struct rcar_gen3_thermal_priv *priv = data;
+ unsigned int i;
+ u32 status;
+
+ for (i = 0; i < priv->num_tscs; i++) {
+ status = rcar_gen3_thermal_read(priv->tscs[i], REG_GEN3_IRQSTR);
+ rcar_gen3_thermal_write(priv->tscs[i], REG_GEN3_IRQSTR, 0);
+ if (status)
+ thermal_zone_device_update(priv->tscs[i]->zone,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ return IRQ_HANDLED;
+}
+
static const struct soc_device_attribute r8a7795es1[] = {
{ .soc_id = "r8a7795", .revision = "ES1.*" },
{ /* sentinel */ }
@@ -210,6 +264,9 @@ static void rcar_gen3_thermal_init_r8a7795es1(struct rcar_gen3_thermal_tsc *tsc)
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0x3F);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, 0);
+ if (tsc->zone->ops->set_trips)
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQEN,
+ IRQ_TEMPD1 | IRQ_TEMP2);
rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR,
CTSR_PONM | CTSR_AOUT | CTSR_THBGR | CTSR_VMEN);
@@ -235,6 +292,9 @@ static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, 0);
+ if (tsc->zone->ops->set_trips)
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQEN,
+ IRQ_TEMPD1 | IRQ_TEMP2);
reg_val = rcar_gen3_thermal_read(tsc, REG_GEN3_THCTR);
reg_val |= THCTR_THSST;
@@ -303,6 +363,34 @@ static void rcar_gen3_hwmon_action(void *data)
thermal_remove_hwmon_sysfs(zone);
}
+static int rcar_gen3_thermal_request_irqs(struct rcar_gen3_thermal_priv *priv,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int i;
+ char *irqname;
+ int ret, irq;
+
+ for (i = 0; i < 2; i++) {
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s:ch%d",
+ dev_name(dev), i);
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rcar_gen3_thermal_irq,
+ IRQF_ONESHOT, irqname, priv);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int rcar_gen3_thermal_probe(struct platform_device *pdev)
{
struct rcar_gen3_thermal_priv *priv;
@@ -310,7 +398,8 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
const int *ths_tj_1 = of_device_get_match_data(dev);
struct resource *res;
struct thermal_zone_device *zone;
- int ret, i;
+ unsigned int i;
+ int ret;
/* default values if FUSEs are missing */
/* TODO: Read values from hardware on supported platforms */
@@ -326,6 +415,9 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
+ if (rcar_gen3_thermal_request_irqs(priv, pdev))
+ rcar_gen3_tz_of_ops.set_trips = NULL;
+
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
@@ -351,9 +443,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
priv->tscs[i] = tsc;
- priv->thermal_init(tsc);
- rcar_gen3_thermal_calc_coefs(tsc, ptat, thcodes[i], *ths_tj_1);
-
zone = devm_thermal_zone_of_sensor_register(dev, i, tsc,
&rcar_gen3_tz_of_ops);
if (IS_ERR(zone)) {
@@ -363,6 +452,9 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
}
tsc->zone = zone;
+ priv->thermal_init(tsc);
+ rcar_gen3_thermal_calc_coefs(tsc, ptat, thcodes[i], *ths_tj_1);
+
tsc->zone->tzp->no_hwmon = false;
ret = thermal_add_hwmon_sysfs(tsc->zone);
if (ret)
@@ -376,7 +468,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (ret < 0)
goto error_unregister;
- dev_info(dev, "TSC%d: Loaded %d trip points\n", i, ret);
+ dev_info(dev, "TSC%u: Loaded %d trip points\n", i, ret);
}
priv->num_tscs = i;
@@ -401,8 +493,12 @@ static int __maybe_unused rcar_gen3_thermal_resume(struct device *dev)
for (i = 0; i < priv->num_tscs; i++) {
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
+ struct thermal_zone_device *zone = tsc->zone;
priv->thermal_init(tsc);
+ if (zone->ops->set_trips)
+ rcar_gen3_thermal_set_trips(tsc, zone->prev_low_trip,
+ zone->prev_high_trip);
}
return 0;
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index e9a90bc23b11..f4ab4c5b4b62 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1073,6 +1073,7 @@ static int exynos_tmu_probe(struct platform_device *pdev)
data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
if (IS_ERR(data->sclk)) {
dev_err(&pdev->dev, "Failed to get sclk\n");
+ ret = PTR_ERR(data->sclk);
goto err_clk;
} else {
ret = clk_prepare_enable(data->sclk);
diff --git a/drivers/thermal/tegra/Kconfig b/drivers/thermal/tegra/Kconfig
index 46c2215867cd..cfa41d87a794 100644
--- a/drivers/thermal/tegra/Kconfig
+++ b/drivers/thermal/tegra/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "NVIDIA Tegra thermal drivers"
-depends on ARCH_TEGRA
+depends on ARCH_TEGRA || COMPILE_TEST
config TEGRA_SOCTHERM
tristate "Tegra SOCTHERM thermal management"
@@ -18,4 +18,11 @@ config TEGRA_BPMP_THERMAL
Enable this option for support for sensing system temperature of NVIDIA
Tegra systems-on-chip with the BPMP coprocessor (Tegra186).
+config TEGRA30_TSENSOR
+ tristate "Tegra30 Thermal Sensor"
+ depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
+ help
+ Enable this option to support thermal management of NVIDIA Tegra30
+ system-on-chip.
+
endmenu
diff --git a/drivers/thermal/tegra/Makefile b/drivers/thermal/tegra/Makefile
index 0f2b66edf0d2..eb27d194c583 100644
--- a/drivers/thermal/tegra/Makefile
+++ b/drivers/thermal/tegra/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEGRA_SOCTHERM) += tegra-soctherm.o
obj-$(CONFIG_TEGRA_BPMP_THERMAL) += tegra-bpmp-thermal.o
+obj-$(CONFIG_TEGRA30_TSENSOR) += tegra30-tsensor.o
tegra-soctherm-y := soctherm.o soctherm-fuse.o
tegra-soctherm-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124-soctherm.o
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 8e303e9d1dc0..210325f92559 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -450,8 +450,8 @@ static int enforce_temp_range(struct device *dev, int trip_temp)
temp = clamp_val(trip_temp, min_low_temp, max_high_temp);
if (temp != trip_temp)
- dev_info(dev, "soctherm: trip temperature %d forced to %d\n",
- trip_temp, temp);
+ dev_dbg(dev, "soctherm: trip temperature %d forced to %d\n",
+ trip_temp, temp);
return temp;
}
diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c
new file mode 100644
index 000000000000..9b6b693cbcf8
--- /dev/null
+++ b/drivers/thermal/tegra/tegra30-tsensor.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tegra30 SoC Thermal Sensor driver
+ *
+ * Based on downstream HWMON driver from NVIDIA.
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * Author: Dmitry Osipenko <digetx@gmail.com>
+ * Copyright (C) 2021 GRATE-DRIVER project
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <linux/types.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "../thermal_core.h"
+#include "../thermal_hwmon.h"
+
+#define TSENSOR_SENSOR0_CONFIG0 0x0
+#define TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP BIT(0)
+#define TSENSOR_SENSOR0_CONFIG0_HW_FREQ_DIV_EN BIT(1)
+#define TSENSOR_SENSOR0_CONFIG0_THERMAL_RST_EN BIT(2)
+#define TSENSOR_SENSOR0_CONFIG0_DVFS_EN BIT(3)
+#define TSENSOR_SENSOR0_CONFIG0_INTR_OVERFLOW_EN BIT(4)
+#define TSENSOR_SENSOR0_CONFIG0_INTR_HW_FREQ_DIV_EN BIT(5)
+#define TSENSOR_SENSOR0_CONFIG0_INTR_THERMAL_RST_EN BIT(6)
+#define TSENSOR_SENSOR0_CONFIG0_M GENMASK(23, 8)
+#define TSENSOR_SENSOR0_CONFIG0_N GENMASK(31, 24)
+
+#define TSENSOR_SENSOR0_CONFIG1 0x8
+#define TSENSOR_SENSOR0_CONFIG1_TH1 GENMASK(15, 0)
+#define TSENSOR_SENSOR0_CONFIG1_TH2 GENMASK(31, 16)
+
+#define TSENSOR_SENSOR0_CONFIG2 0xc
+#define TSENSOR_SENSOR0_CONFIG2_TH3 GENMASK(15, 0)
+
+#define TSENSOR_SENSOR0_STATUS0 0x18
+#define TSENSOR_SENSOR0_STATUS0_STATE GENMASK(2, 0)
+#define TSENSOR_SENSOR0_STATUS0_INTR BIT(8)
+#define TSENSOR_SENSOR0_STATUS0_CURRENT_VALID BIT(9)
+
+#define TSENSOR_SENSOR0_TS_STATUS1 0x1c
+#define TSENSOR_SENSOR0_TS_STATUS1_CURRENT_COUNT GENMASK(31, 16)
+
+#define TEGRA30_FUSE_TEST_PROG_VER 0x28
+
+#define TEGRA30_FUSE_TSENSOR_CALIB 0x98
+#define TEGRA30_FUSE_TSENSOR_CALIB_LOW GENMASK(15, 0)
+#define TEGRA30_FUSE_TSENSOR_CALIB_HIGH GENMASK(31, 16)
+
+#define TEGRA30_FUSE_SPARE_BIT 0x144
+
+struct tegra_tsensor;
+
+struct tegra_tsensor_calibration_data {
+ int a, b, m, n, p, r;
+};
+
+struct tegra_tsensor_channel {
+ void __iomem *regs;
+ unsigned int id;
+ struct tegra_tsensor *ts;
+ struct thermal_zone_device *tzd;
+};
+
+struct tegra_tsensor {
+ void __iomem *regs;
+ bool swap_channels;
+ struct clk *clk;
+ struct device *dev;
+ struct reset_control *rst;
+ struct tegra_tsensor_channel ch[2];
+ struct tegra_tsensor_calibration_data calib;
+};
+
+static int tegra_tsensor_hw_enable(const struct tegra_tsensor *ts)
+{
+ u32 val;
+ int err;
+
+ err = reset_control_assert(ts->rst);
+ if (err) {
+ dev_err(ts->dev, "failed to assert hardware reset: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(ts->clk);
+ if (err) {
+ dev_err(ts->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ fsleep(1000);
+
+ err = reset_control_deassert(ts->rst);
+ if (err) {
+ dev_err(ts->dev, "failed to deassert hardware reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ /*
+ * Sensors are enabled after reset by default, but not gauging
+ * until clock counter is programmed.
+ *
+ * M: number of reference clock pulses after which every
+ * temperature / voltage measurement is made
+ *
+ * N: number of reference clock counts for which the counter runs
+ */
+ val = FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_M, 12500);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_N, 255);
+
+ /* apply the same configuration to both channels */
+ writel_relaxed(val, ts->regs + 0x40 + TSENSOR_SENSOR0_CONFIG0);
+ writel_relaxed(val, ts->regs + 0x80 + TSENSOR_SENSOR0_CONFIG0);
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(ts->clk);
+
+ return err;
+}
+
+static int tegra_tsensor_hw_disable(const struct tegra_tsensor *ts)
+{
+ int err;
+
+ err = reset_control_assert(ts->rst);
+ if (err) {
+ dev_err(ts->dev, "failed to assert hardware reset: %d\n", err);
+ return err;
+ }
+
+ clk_disable_unprepare(ts->clk);
+
+ return 0;
+}
+
+static void devm_tegra_tsensor_hw_disable(void *data)
+{
+ const struct tegra_tsensor *ts = data;
+
+ tegra_tsensor_hw_disable(ts);
+}
+
+static int tegra_tsensor_get_temp(void *data, int *temp)
+{
+ const struct tegra_tsensor_channel *tsc = data;
+ const struct tegra_tsensor *ts = tsc->ts;
+ int err, c1, c2, c3, c4, counter;
+ u32 val;
+
+ /*
+ * Counter will be invalid if hardware is misprogrammed or not enough
+ * time passed since the time when sensor was enabled.
+ */
+ err = readl_relaxed_poll_timeout(tsc->regs + TSENSOR_SENSOR0_STATUS0, val,
+ val & TSENSOR_SENSOR0_STATUS0_CURRENT_VALID,
+ 21 * USEC_PER_MSEC,
+ 21 * USEC_PER_MSEC * 50);
+ if (err) {
+ dev_err_once(ts->dev, "ch%u: counter invalid\n", tsc->id);
+ return err;
+ }
+
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_TS_STATUS1);
+ counter = FIELD_GET(TSENSOR_SENSOR0_TS_STATUS1_CURRENT_COUNT, val);
+
+ /*
+ * This shouldn't happen with a valid counter status, nevertheless
+ * lets verify the value since it's in a separate (from status)
+ * register.
+ */
+ if (counter == 0xffff) {
+ dev_err_once(ts->dev, "ch%u: counter overflow\n", tsc->id);
+ return -EINVAL;
+ }
+
+ /*
+ * temperature = a * counter + b
+ * temperature = m * (temperature ^ 2) + n * temperature + p
+ */
+ c1 = DIV_ROUND_CLOSEST(ts->calib.a * counter + ts->calib.b, 1000000);
+ c1 = c1 ?: 1;
+ c2 = DIV_ROUND_CLOSEST(ts->calib.p, c1);
+ c3 = c1 * ts->calib.m;
+ c4 = ts->calib.n;
+
+ *temp = DIV_ROUND_CLOSEST(c1 * (c2 + c3 + c4), 1000);
+
+ return 0;
+}
+
+static int tegra_tsensor_temp_to_counter(const struct tegra_tsensor *ts, int temp)
+{
+ int c1, c2;
+
+ c1 = DIV_ROUND_CLOSEST(ts->calib.p - temp * 1000, ts->calib.m);
+ c2 = -ts->calib.r - int_sqrt(ts->calib.r * ts->calib.r - c1);
+
+ return DIV_ROUND_CLOSEST(c2 * 1000000 - ts->calib.b, ts->calib.a);
+}
+
+static int tegra_tsensor_set_trips(void *data, int low, int high)
+{
+ const struct tegra_tsensor_channel *tsc = data;
+ const struct tegra_tsensor *ts = tsc->ts;
+ u32 val;
+
+ /*
+ * TSENSOR doesn't trigger interrupt on the "low" temperature breach,
+ * hence bail out if high temperature is unspecified.
+ */
+ if (high == INT_MAX)
+ return 0;
+
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG1);
+ val &= ~TSENSOR_SENSOR0_CONFIG1_TH1;
+
+ high = tegra_tsensor_temp_to_counter(ts, high);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG1_TH1, high);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG1);
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops ops = {
+ .get_temp = tegra_tsensor_get_temp,
+ .set_trips = tegra_tsensor_set_trips,
+};
+
+static bool
+tegra_tsensor_handle_channel_interrupt(const struct tegra_tsensor *ts,
+ unsigned int id)
+{
+ const struct tegra_tsensor_channel *tsc = &ts->ch[id];
+ u32 val;
+
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_STATUS0);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_STATUS0);
+
+ if (FIELD_GET(TSENSOR_SENSOR0_STATUS0_STATE, val) == 5)
+ dev_err_ratelimited(ts->dev, "ch%u: counter overflowed\n", id);
+
+ if (!FIELD_GET(TSENSOR_SENSOR0_STATUS0_INTR, val))
+ return false;
+
+ thermal_zone_device_update(tsc->tzd, THERMAL_EVENT_UNSPECIFIED);
+
+ return true;
+}
+
+static irqreturn_t tegra_tsensor_isr(int irq, void *data)
+{
+ const struct tegra_tsensor *ts = data;
+ bool handled = false;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++)
+ handled |= tegra_tsensor_handle_channel_interrupt(ts, i);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int tegra_tsensor_disable_hw_channel(const struct tegra_tsensor *ts,
+ unsigned int id)
+{
+ const struct tegra_tsensor_channel *tsc = &ts->ch[id];
+ struct thermal_zone_device *tzd = tsc->tzd;
+ u32 val;
+ int err;
+
+ if (!tzd)
+ goto stop_channel;
+
+ err = thermal_zone_device_disable(tzd);
+ if (err) {
+ dev_err(ts->dev, "ch%u: failed to disable zone: %d\n", id, err);
+ return err;
+ }
+
+stop_channel:
+ /* stop channel gracefully */
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP, 1);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+
+ return 0;
+}
+
+static void tegra_tsensor_get_hw_channel_trips(struct thermal_zone_device *tzd,
+ int *hot_trip, int *crit_trip)
+{
+ unsigned int i;
+
+ /*
+ * 90C is the maximal critical temperature of all Tegra30 SoC variants,
+ * use it for the default trip if unspecified in a device-tree.
+ */
+ *hot_trip = 85000;
+ *crit_trip = 90000;
+
+ for (i = 0; i < tzd->trips; i++) {
+ enum thermal_trip_type type;
+ int trip_temp;
+
+ tzd->ops->get_trip_temp(tzd, i, &trip_temp);
+ tzd->ops->get_trip_type(tzd, i, &type);
+
+ if (type == THERMAL_TRIP_HOT)
+ *hot_trip = trip_temp;
+
+ if (type == THERMAL_TRIP_CRITICAL)
+ *crit_trip = trip_temp;
+ }
+
+ /* clamp hardware trips to the calibration limits */
+ *hot_trip = clamp(*hot_trip, 25000, 90000);
+
+ /*
+ * Kernel will perform a normal system shut down if it will
+ * see that critical temperature is breached, hence set the
+ * hardware limit by 5C higher in order to allow system to
+ * shut down gracefully before sending signal to the Power
+ * Management controller.
+ */
+ *crit_trip = clamp(*crit_trip + 5000, 25000, 90000);
+}
+
+static int tegra_tsensor_enable_hw_channel(const struct tegra_tsensor *ts,
+ unsigned int id)
+{
+ const struct tegra_tsensor_channel *tsc = &ts->ch[id];
+ struct thermal_zone_device *tzd = tsc->tzd;
+ int err, hot_trip = 0, crit_trip = 0;
+ u32 val;
+
+ if (!tzd) {
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+ val &= ~TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP;
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+
+ return 0;
+ }
+
+ tegra_tsensor_get_hw_channel_trips(tzd, &hot_trip, &crit_trip);
+
+ /* prevent potential racing with tegra_tsensor_set_trips() */
+ mutex_lock(&tzd->lock);
+
+ dev_info_once(ts->dev, "ch%u: PMC emergency shutdown trip set to %dC\n",
+ id, DIV_ROUND_CLOSEST(crit_trip, 1000));
+
+ hot_trip = tegra_tsensor_temp_to_counter(ts, hot_trip);
+ crit_trip = tegra_tsensor_temp_to_counter(ts, crit_trip);
+
+ /* program LEVEL2 counter threshold */
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG1);
+ val &= ~TSENSOR_SENSOR0_CONFIG1_TH2;
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG1_TH2, hot_trip);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG1);
+
+ /* program LEVEL3 counter threshold */
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG2);
+ val &= ~TSENSOR_SENSOR0_CONFIG2_TH3;
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG2_TH3, crit_trip);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG2);
+
+ /*
+ * Enable sensor, emergency shutdown, interrupts for level 1/2/3
+ * breaches and counter overflow condition.
+ *
+ * Disable DIV2 throttle for now since we need to figure out how
+ * to integrate it properly with the thermal framework.
+ *
+ * Thermal levels supported by hardware:
+ *
+ * Level 0 = cold
+ * Level 1 = passive cooling (cpufreq DVFS)
+ * Level 2 = passive cooling assisted by hardware (DIV2)
+ * Level 3 = emergency shutdown assisted by hardware (PMC)
+ */
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+ val &= ~TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP;
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_DVFS_EN, 1);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_HW_FREQ_DIV_EN, 0);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_THERMAL_RST_EN, 1);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_INTR_OVERFLOW_EN, 1);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_INTR_HW_FREQ_DIV_EN, 1);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_INTR_THERMAL_RST_EN, 1);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+
+ mutex_unlock(&tzd->lock);
+
+ err = thermal_zone_device_enable(tzd);
+ if (err) {
+ dev_err(ts->dev, "ch%u: failed to enable zone: %d\n", id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static bool tegra_tsensor_fuse_read_spare(unsigned int spare)
+{
+ u32 val = 0;
+
+ tegra_fuse_readl(TEGRA30_FUSE_SPARE_BIT + spare * 4, &val);
+
+ return !!val;
+}
+
+static int tegra_tsensor_nvmem_setup(struct tegra_tsensor *ts)
+{
+ u32 i, ate_ver = 0, cal = 0, t1_25C = 0, t2_90C = 0;
+ int err, c1_25C, c2_90C;
+
+ err = tegra_fuse_readl(TEGRA30_FUSE_TEST_PROG_VER, &ate_ver);
+ if (err) {
+ dev_err_probe(ts->dev, err, "failed to get ATE version\n");
+ return err;
+ }
+
+ if (ate_ver < 8) {
+ dev_info(ts->dev, "unsupported ATE version: %u\n", ate_ver);
+ return -ENODEV;
+ }
+
+ /*
+ * We have two TSENSOR channels in a two different spots on SoC.
+ * Second channel provides more accurate data on older SoC versions,
+ * use it as a primary channel.
+ */
+ if (ate_ver <= 21) {
+ dev_info_once(ts->dev,
+ "older ATE version detected, channels remapped\n");
+ ts->swap_channels = true;
+ }
+
+ err = tegra_fuse_readl(TEGRA30_FUSE_TSENSOR_CALIB, &cal);
+ if (err) {
+ dev_err(ts->dev, "failed to get calibration data: %d\n", err);
+ return err;
+ }
+
+ /* get calibrated counter values for 25C/90C thresholds */
+ c1_25C = FIELD_GET(TEGRA30_FUSE_TSENSOR_CALIB_LOW, cal);
+ c2_90C = FIELD_GET(TEGRA30_FUSE_TSENSOR_CALIB_HIGH, cal);
+
+ /* and calibrated temperatures corresponding to the counter values */
+ for (i = 0; i < 7; i++) {
+ t1_25C |= tegra_tsensor_fuse_read_spare(14 + i) << i;
+ t1_25C |= tegra_tsensor_fuse_read_spare(21 + i) << i;
+
+ t2_90C |= tegra_tsensor_fuse_read_spare(0 + i) << i;
+ t2_90C |= tegra_tsensor_fuse_read_spare(7 + i) << i;
+ }
+
+ if (c2_90C - c1_25C <= t2_90C - t1_25C) {
+ dev_err(ts->dev, "invalid calibration data: %d %d %u %u\n",
+ c2_90C, c1_25C, t2_90C, t1_25C);
+ return -EINVAL;
+ }
+
+ /* all calibration coefficients are premultiplied by 1000000 */
+
+ ts->calib.a = DIV_ROUND_CLOSEST((t2_90C - t1_25C) * 1000000,
+ (c2_90C - c1_25C));
+
+ ts->calib.b = t1_25C * 1000000 - ts->calib.a * c1_25C;
+
+ if (tegra_sku_info.revision == TEGRA_REVISION_A01) {
+ ts->calib.m = -2775;
+ ts->calib.n = 1338811;
+ ts->calib.p = -7300000;
+ } else {
+ ts->calib.m = -3512;
+ ts->calib.n = 1528943;
+ ts->calib.p = -11100000;
+ }
+
+ /* except the coefficient of a reduced quadratic equation */
+ ts->calib.r = DIV_ROUND_CLOSEST(ts->calib.n, ts->calib.m * 2);
+
+ dev_info_once(ts->dev,
+ "calibration: %d %d %u %u ATE ver: %u SoC rev: %u\n",
+ c2_90C, c1_25C, t2_90C, t1_25C, ate_ver,
+ tegra_sku_info.revision);
+
+ return 0;
+}
+
+static int tegra_tsensor_register_channel(struct tegra_tsensor *ts,
+ unsigned int id)
+{
+ struct tegra_tsensor_channel *tsc = &ts->ch[id];
+ unsigned int hw_id = ts->swap_channels ? !id : id;
+
+ tsc->ts = ts;
+ tsc->id = id;
+ tsc->regs = ts->regs + 0x40 * (hw_id + 1);
+
+ tsc->tzd = devm_thermal_zone_of_sensor_register(ts->dev, id, tsc, &ops);
+ if (IS_ERR(tsc->tzd)) {
+ if (PTR_ERR(tsc->tzd) != -ENODEV)
+ return dev_err_probe(ts->dev, PTR_ERR(tsc->tzd),
+ "failed to register thermal zone\n");
+
+ /*
+ * It's okay if sensor isn't assigned to any thermal zone
+ * in a device-tree.
+ */
+ tsc->tzd = NULL;
+ return 0;
+ }
+
+ if (devm_thermal_add_hwmon_sysfs(tsc->tzd))
+ dev_warn(ts->dev, "failed to add hwmon sysfs attributes\n");
+
+ return 0;
+}
+
+static int tegra_tsensor_probe(struct platform_device *pdev)
+{
+ struct tegra_tsensor *ts;
+ unsigned int i;
+ int err, irq;
+
+ ts = devm_kzalloc(&pdev->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ts->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ts);
+
+ ts->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ts->regs))
+ return PTR_ERR(ts->regs);
+
+ ts->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ts->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts->clk),
+ "failed to get clock\n");
+
+ ts->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(ts->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts->rst),
+ "failed to get reset control\n");
+
+ err = tegra_tsensor_nvmem_setup(ts);
+ if (err)
+ return err;
+
+ err = tegra_tsensor_hw_enable(ts);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(&pdev->dev,
+ devm_tegra_tsensor_hw_disable,
+ ts);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
+ err = tegra_tsensor_register_channel(ts, i);
+ if (err)
+ return err;
+ }
+
+ err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ tegra_tsensor_isr, IRQF_ONESHOT,
+ "tegra_tsensor", ts);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to request interrupt\n");
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
+ err = tegra_tsensor_enable_hw_channel(ts, i);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_tsensor_suspend(struct device *dev)
+{
+ struct tegra_tsensor *ts = dev_get_drvdata(dev);
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
+ err = tegra_tsensor_disable_hw_channel(ts, i);
+ if (err)
+ goto enable_channel;
+ }
+
+ err = tegra_tsensor_hw_disable(ts);
+ if (err)
+ goto enable_channel;
+
+ return 0;
+
+enable_channel:
+ while (i--)
+ tegra_tsensor_enable_hw_channel(ts, i);
+
+ return err;
+}
+
+static int __maybe_unused tegra_tsensor_resume(struct device *dev)
+{
+ struct tegra_tsensor *ts = dev_get_drvdata(dev);
+ unsigned int i;
+ int err;
+
+ err = tegra_tsensor_hw_enable(ts);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
+ err = tegra_tsensor_enable_hw_channel(ts, i);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_tsensor_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_tsensor_suspend,
+ tegra_tsensor_resume)
+};
+
+static const struct of_device_id tegra_tsensor_of_match[] = {
+ { .compatible = "nvidia,tegra30-tsensor", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tegra_tsensor_of_match);
+
+static struct platform_driver tegra_tsensor_driver = {
+ .probe = tegra_tsensor_probe,
+ .driver = {
+ .name = "tegra30-tsensor",
+ .of_match_table = tegra_tsensor_of_match,
+ .pm = &tegra_tsensor_pm_ops,
+ },
+};
+module_platform_driver(tegra_tsensor_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra30 Thermal Sensor driver");
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
index eca0ef311bde..1f69bab236ee 100644
--- a/drivers/thunderbolt/test.c
+++ b/drivers/thunderbolt/test.c
@@ -2206,23 +2206,13 @@ static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
tb_tunnel_free(tunnel2);
}
-static void tb_test_credit_alloc_all(struct kunit *test)
+static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
+ struct tb_switch *host, struct tb_switch *dev)
{
- struct tb_port *up, *down, *in, *out, *nhi, *port;
- struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
- struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
- struct tb_switch *host, *dev;
+ struct tb_port *up, *down;
+ struct tb_tunnel *pcie_tunnel;
struct tb_path *path;
- /*
- * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
- * device. Expectation is that all these can be established with
- * the default credit allocation found in Intel hardware.
- */
-
- host = alloc_host_usb4(test);
- dev = alloc_dev_usb4(test, host, 0x1, true);
-
down = &host->ports[8];
up = &dev->ports[9];
pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down);
@@ -2243,9 +2233,18 @@ static void tb_test_credit_alloc_all(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
+ return pcie_tunnel;
+}
+
+static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
+ struct tb_switch *host, struct tb_switch *dev)
+{
+ struct tb_port *in, *out;
+ struct tb_tunnel *dp_tunnel1;
+ struct tb_path *path;
+
in = &host->ports[5];
out = &dev->ports[13];
-
dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
KUNIT_ASSERT_TRUE(test, dp_tunnel1 != NULL);
KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
@@ -2271,9 +2270,18 @@ static void tb_test_credit_alloc_all(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
+ return dp_tunnel1;
+}
+
+static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
+ struct tb_switch *host, struct tb_switch *dev)
+{
+ struct tb_port *in, *out;
+ struct tb_tunnel *dp_tunnel2;
+ struct tb_path *path;
+
in = &host->ports[6];
out = &dev->ports[14];
-
dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
KUNIT_ASSERT_TRUE(test, dp_tunnel2 != NULL);
KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
@@ -2299,6 +2307,16 @@ static void tb_test_credit_alloc_all(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
+ return dp_tunnel2;
+}
+
+static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test,
+ struct tb_switch *host, struct tb_switch *dev)
+{
+ struct tb_port *up, *down;
+ struct tb_tunnel *usb3_tunnel;
+ struct tb_path *path;
+
down = &host->ports[12];
up = &dev->ports[16];
usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
@@ -2319,9 +2337,18 @@ static void tb_test_credit_alloc_all(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
+ return usb3_tunnel;
+}
+
+static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test,
+ struct tb_switch *host, struct tb_switch *dev)
+{
+ struct tb_port *nhi, *port;
+ struct tb_tunnel *dma_tunnel1;
+ struct tb_path *path;
+
nhi = &host->ports[7];
port = &dev->ports[3];
-
dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
KUNIT_ASSERT_TRUE(test, dma_tunnel1 != NULL);
KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2);
@@ -2340,6 +2367,18 @@ static void tb_test_credit_alloc_all(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
+ return dma_tunnel1;
+}
+
+static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test,
+ struct tb_switch *host, struct tb_switch *dev)
+{
+ struct tb_port *nhi, *port;
+ struct tb_tunnel *dma_tunnel2;
+ struct tb_path *path;
+
+ nhi = &host->ports[7];
+ port = &dev->ports[3];
dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
KUNIT_ASSERT_TRUE(test, dma_tunnel2 != NULL);
KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2);
@@ -2358,6 +2397,31 @@ static void tb_test_credit_alloc_all(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
+ return dma_tunnel2;
+}
+
+static void tb_test_credit_alloc_all(struct kunit *test)
+{
+ struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
+ struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
+ struct tb_switch *host, *dev;
+
+ /*
+ * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
+ * device. Expectation is that all these can be established with
+ * the default credit allocation found in Intel hardware.
+ */
+
+ host = alloc_host_usb4(test);
+ dev = alloc_dev_usb4(test, host, 0x1, true);
+
+ pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev);
+ dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev);
+ dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev);
+ usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev);
+ dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
+ dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
+
tb_tunnel_free(dma_tunnel2);
tb_tunnel_free(dma_tunnel1);
tb_tunnel_free(usb3_tunnel);
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index be640d9863cd..643dfbcc43f9 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -496,7 +496,7 @@ static int __init mux_probe(struct parisc_device *dev)
return 0;
}
-static int __exit mux_remove(struct parisc_device *dev)
+static void __exit mux_remove(struct parisc_device *dev)
{
int i, j;
int port_count = (long)dev_get_drvdata(&dev->dev);
@@ -518,7 +518,6 @@ static int __exit mux_remove(struct parisc_device *dev)
}
release_mem_region(dev->hpa.start + MUX_OFFSET, port_count * MUX_LINE_OFFSET);
- return 0;
}
/* Hack. This idea was taken from the 8250_gsc.c on how to properly order
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 3c80bfbf3bec..66a6ac50a4cd 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -400,6 +400,7 @@ struct cp210x_special_chars {
};
/* CP210X_VENDOR_SPECIFIC values */
+#define CP210X_GET_FW_VER 0x000E
#define CP210X_READ_2NCONFIG 0x000E
#define CP210X_GET_FW_VER_2N 0x0010
#define CP210X_READ_LATCH 0x00C2
@@ -638,7 +639,7 @@ static int cp210x_read_reg_block(struct usb_serial_port *port, u8 req,
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
req, REQTYPE_INTERFACE_TO_HOST, 0,
port_priv->bInterfaceNumber, dmabuf, bufsize,
- USB_CTRL_SET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
if (result == bufsize) {
memcpy(buf, dmabuf, bufsize);
result = 0;
@@ -1145,33 +1146,6 @@ static void cp210x_disable_event_mode(struct usb_serial_port *port)
port_priv->event_mode = false;
}
-static int cp210x_set_chars(struct usb_serial_port *port,
- struct cp210x_special_chars *chars)
-{
- struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
- struct usb_serial *serial = port->serial;
- void *dmabuf;
- int result;
-
- dmabuf = kmemdup(chars, sizeof(*chars), GFP_KERNEL);
- if (!dmabuf)
- return -ENOMEM;
-
- result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- CP210X_SET_CHARS, REQTYPE_HOST_TO_INTERFACE, 0,
- port_priv->bInterfaceNumber,
- dmabuf, sizeof(*chars), USB_CTRL_SET_TIMEOUT);
-
- kfree(dmabuf);
-
- if (result < 0) {
- dev_err(&port->dev, "failed to set special chars: %d\n", result);
- return result;
- }
-
- return 0;
-}
-
static bool cp210x_termios_change(const struct ktermios *a, const struct ktermios *b)
{
bool iflag_change, cc_change;
@@ -1192,6 +1166,7 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
struct cp210x_flow_ctl flow_ctl;
u32 flow_repl;
u32 ctl_hs;
+ bool crtscts;
int ret;
/*
@@ -1218,9 +1193,12 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
chars.bXonChar = START_CHAR(tty);
chars.bXoffChar = STOP_CHAR(tty);
- ret = cp210x_set_chars(port, &chars);
- if (ret)
- return;
+ ret = cp210x_write_reg_block(port, CP210X_SET_CHARS, &chars,
+ sizeof(chars));
+ if (ret) {
+ dev_err(&port->dev, "failed to set special chars: %d\n",
+ ret);
+ }
}
mutex_lock(&port_priv->mutex);
@@ -1249,14 +1227,14 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
flow_repl |= CP210X_SERIAL_RTS_FLOW_CTL;
else
flow_repl |= CP210X_SERIAL_RTS_INACTIVE;
- port_priv->crtscts = true;
+ crtscts = true;
} else {
ctl_hs &= ~CP210X_SERIAL_CTS_HANDSHAKE;
if (port_priv->rts)
flow_repl |= CP210X_SERIAL_RTS_ACTIVE;
else
flow_repl |= CP210X_SERIAL_RTS_INACTIVE;
- port_priv->crtscts = false;
+ crtscts = false;
}
if (I_IXOFF(tty)) {
@@ -1279,8 +1257,12 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
flow_ctl.ulControlHandshake = cpu_to_le32(ctl_hs);
flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl);
- cp210x_write_reg_block(port, CP210X_SET_FLOW, &flow_ctl,
+ ret = cp210x_write_reg_block(port, CP210X_SET_FLOW, &flow_ctl,
sizeof(flow_ctl));
+ if (ret)
+ goto out_unlock;
+
+ port_priv->crtscts = crtscts;
out_unlock:
mutex_unlock(&port_priv->mutex);
}
@@ -2111,12 +2093,26 @@ static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
return 0;
}
-static void cp210x_determine_quirks(struct usb_serial *serial)
+static void cp210x_determine_type(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
int ret;
+ ret = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
+ CP210X_GET_PARTNUM, &priv->partnum,
+ sizeof(priv->partnum));
+ if (ret < 0) {
+ dev_warn(&serial->interface->dev,
+ "querying part number failed\n");
+ priv->partnum = CP210X_PARTNUM_UNKNOWN;
+ return;
+ }
+
switch (priv->partnum) {
+ case CP210X_PARTNUM_CP2105:
+ case CP210X_PARTNUM_CP2108:
+ cp210x_get_fw_version(serial, CP210X_GET_FW_VER);
+ break;
case CP210X_PARTNUM_CP2102N_QFN28:
case CP210X_PARTNUM_CP2102N_QFN24:
case CP210X_PARTNUM_CP2102N_QFN20:
@@ -2140,18 +2136,9 @@ static int cp210x_attach(struct usb_serial *serial)
if (!priv)
return -ENOMEM;
- result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
- CP210X_GET_PARTNUM, &priv->partnum,
- sizeof(priv->partnum));
- if (result < 0) {
- dev_warn(&serial->interface->dev,
- "querying part number failed\n");
- priv->partnum = CP210X_PARTNUM_UNKNOWN;
- }
-
usb_set_serial_data(serial, priv);
- cp210x_determine_quirks(serial);
+ cp210x_determine_type(serial);
cp210x_init_max_speed(serial);
result = cp210x_gpio_init(serial);
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 6b18990258c3..6924fa95f6bd 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -1199,9 +1199,9 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-module_param(stats, bool, S_IRUGO | S_IWUSR);
+module_param(stats, bool, 0644);
MODULE_PARM_DESC(stats, "Enable statistics or not");
-module_param(interval, int, S_IRUGO | S_IWUSR);
+module_param(interval, int, 0644);
MODULE_PARM_DESC(interval, "Overrides interrupt interval");
-module_param(unstable_bauds, bool, S_IRUGO | S_IWUSR);
+module_param(unstable_bauds, bool, 0644);
MODULE_PARM_DESC(unstable_bauds, "Allow unstable baud rates");
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 33bbb3470ca3..99d19828dae6 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2938,5 +2938,5 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-module_param(ndi_latency_timer, int, S_IRUGO | S_IWUSR);
+module_param(ndi_latency_timer, int, 0644);
MODULE_PARM_DESC(ndi_latency_timer, "NDI device latency timer override");
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 756d1ac7e96f..e5c75944ebb7 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1444,5 +1444,5 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-module_param(initial_mode, int, S_IRUGO);
+module_param(initial_mode, int, 0444);
MODULE_PARM_DESC(initial_mode, "Initial mode");
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index ea4edf5eed27..bdee78cc4a07 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -389,39 +389,6 @@ static void update_edgeport_E2PROM(struct edgeport_serial *edge_serial)
release_firmware(fw);
}
-#if 0
-/************************************************************************
- *
- * Get string descriptor from device
- *
- ************************************************************************/
-static int get_string_desc(struct usb_device *dev, int Id,
- struct usb_string_descriptor **pRetDesc)
-{
- struct usb_string_descriptor StringDesc;
- struct usb_string_descriptor *pStringDesc;
-
- dev_dbg(&dev->dev, "%s - USB String ID = %d\n", __func__, Id);
-
- if (!usb_get_descriptor(dev, USB_DT_STRING, Id, &StringDesc,
- sizeof(StringDesc)))
- return 0;
-
- pStringDesc = kmalloc(StringDesc.bLength, GFP_KERNEL);
- if (!pStringDesc)
- return -1;
-
- if (!usb_get_descriptor(dev, USB_DT_STRING, Id, pStringDesc,
- StringDesc.bLength)) {
- kfree(pStringDesc);
- return -1;
- }
-
- *pRetDesc = pStringDesc;
- return 0;
-}
-#endif
-
static void dump_product_info(struct edgeport_serial *edge_serial,
struct edgeport_product_info *product_info)
{
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 84b848d2794e..a7b3c15957ba 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2746,9 +2746,9 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("edgeport/down3.bin");
-module_param(ignore_cpu_rev, bool, S_IRUGO | S_IWUSR);
+module_param(ignore_cpu_rev, bool, 0644);
MODULE_PARM_DESC(ignore_cpu_rev,
"Ignore the cpu revision when connecting to a device");
-module_param(default_uart_mode, int, S_IRUGO | S_IWUSR);
+module_param(default_uart_mode, int, 0644);
MODULE_PARM_DESC(default_uart_mode, "Default uart_mode, 0=RS232, ...");
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index f81746c3c26c..e11441bac44f 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -599,10 +599,10 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-module_param(connect_retries, int, S_IRUGO|S_IWUSR);
+module_param(connect_retries, int, 0644);
MODULE_PARM_DESC(connect_retries,
"Maximum number of connect retries (one second each)");
-module_param(initial_wait, int, S_IRUGO|S_IWUSR);
+module_param(initial_wait, int, 0644);
MODULE_PARM_DESC(initial_wait,
"Time to wait before attempting a connection (in seconds)");
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 19753611e7b0..0be3b5e1eaf3 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1188,20 +1188,20 @@ MODULE_AUTHOR("Alain Degreffe eczema@ecze.com");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-module_param(xmas, bool, S_IRUGO | S_IWUSR);
+module_param(xmas, bool, 0644);
MODULE_PARM_DESC(xmas, "Xmas colors enabled or not");
-module_param(boost, int, S_IRUGO | S_IWUSR);
+module_param(boost, int, 0644);
MODULE_PARM_DESC(boost, "Card overclock boost (in percent 100-500)");
-module_param(clockmode, int, S_IRUGO | S_IWUSR);
+module_param(clockmode, int, 0644);
MODULE_PARM_DESC(clockmode, "Card clock mode (1=3.579 MHz, 2=3.680 MHz, "
"3=6 Mhz)");
-module_param(cdmode, int, S_IRUGO | S_IWUSR);
+module_param(cdmode, int, 0644);
MODULE_PARM_DESC(cdmode, "Card detect mode (0=none, 1=CD, 2=!CD, 3=DSR, "
"4=!DSR, 5=CTS, 6=!CTS, 7=RING, 8=!RING)");
-module_param(vcc_default, int, S_IRUGO | S_IWUSR);
+module_param(vcc_default, int, 0644);
MODULE_PARM_DESC(vcc_default, "Set default VCC (either 3 for 3.3V or 5 "
"for 5V). Default to 5.");
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 930b3d50a330..f45ca7ddf78e 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -433,6 +433,7 @@ static int pl2303_detect_type(struct usb_serial *serial)
switch (bcdDevice) {
case 0x100:
case 0x305:
+ case 0x405:
/*
* Assume it's an HXN-type if the device doesn't
* support the old read request value.
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 4b49b7c33364..9d56138133a9 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -1056,5 +1056,5 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
-module_param(nmea, bool, S_IRUGO | S_IWUSR);
+module_param(nmea, bool, 0644);
MODULE_PARM_DESC(nmea, "NMEA streaming");
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index f4304ce69350..4c5a0a49035f 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -551,7 +551,7 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
/* Did this command access the last sector? */
sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
(srb->cmnd[4] << 8) | (srb->cmnd[5]);
- disk = srb->request->rq_disk;
+ disk = scsi_cmd_to_rq(srb)->rq_disk;
if (!disk)
goto done;
sdkp = scsi_disk(disk);
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index a503c1b2bfd9..3d91982d8371 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -33,6 +33,16 @@ config VDPA_SIM_BLOCK
vDPA block device simulator which terminates IO request in a
memory buffer.
+config VDPA_USER
+ tristate "VDUSE (vDPA Device in Userspace) support"
+ depends on EVENTFD && MMU && HAS_DMA
+ select DMA_OPS
+ select VHOST_IOTLB
+ select IOMMU_IOVA
+ help
+ With VDUSE it is possible to emulate a vDPA Device
+ in a userspace program.
+
config IFCVF
tristate "Intel IFC VF vDPA driver"
depends on PCI_MSI
@@ -53,6 +63,7 @@ config MLX5_VDPA
config MLX5_VDPA_NET
tristate "vDPA driver for ConnectX devices"
select MLX5_VDPA
+ select VHOST_RING
depends on MLX5_CORE
help
VDPA network driver for ConnectX6 and newer. Provides offloading
diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile
index 67fe7f3d6943..f02ebed33f19 100644
--- a/drivers/vdpa/Makefile
+++ b/drivers/vdpa/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VDPA) += vdpa.o
obj-$(CONFIG_VDPA_SIM) += vdpa_sim/
+obj-$(CONFIG_VDPA_USER) += vdpa_user/
obj-$(CONFIG_IFCVF) += ifcvf/
obj-$(CONFIG_MLX5_VDPA) += mlx5/
obj-$(CONFIG_VP_VDPA) += virtio_pci/
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 6e197fe0fcf9..2808f1ba9f7b 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -158,7 +158,9 @@ next:
return -EIO;
}
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ hw->nr_vring = ifc_ioread16(&hw->common_cfg->num_queues);
+
+ for (i = 0; i < hw->nr_vring; i++) {
ifc_iowrite16(i, &hw->common_cfg->queue_select);
notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
hw->vring[i].notify_addr = hw->notify_base +
@@ -304,7 +306,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
+ q_pair_id = qid / hw->nr_vring;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
last_avail_idx = ifc_ioread16(avail_idx_addr);
@@ -318,7 +320,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
+ q_pair_id = qid / hw->nr_vring;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
hw->vring[qid].last_avail_idx = num;
ifc_iowrite16(num, avail_idx_addr);
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 2996db0da490..09918af3ecf8 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -22,17 +22,8 @@
#define N3000_DEVICE_ID 0x1041
#define N3000_SUBSYS_DEVICE_ID 0x001A
-#define IFCVF_NET_SUPPORTED_FEATURES \
- ((1ULL << VIRTIO_NET_F_MAC) | \
- (1ULL << VIRTIO_F_ANY_LAYOUT) | \
- (1ULL << VIRTIO_F_VERSION_1) | \
- (1ULL << VIRTIO_NET_F_STATUS) | \
- (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
- (1ULL << VIRTIO_F_ACCESS_PLATFORM) | \
- (1ULL << VIRTIO_NET_F_MRG_RXBUF))
-
-/* Only one queue pair for now. */
-#define IFCVF_MAX_QUEUE_PAIRS 1
+/* Max 8 data queue pairs(16 queues) and one control vq for now. */
+#define IFCVF_MAX_QUEUES 17
#define IFCVF_QUEUE_ALIGNMENT PAGE_SIZE
#define IFCVF_QUEUE_MAX 32768
@@ -51,8 +42,6 @@
#define ifcvf_private_to_vf(adapter) \
(&((struct ifcvf_adapter *)adapter)->vf)
-#define IFCVF_MAX_INTR (IFCVF_MAX_QUEUE_PAIRS * 2 + 1)
-
struct vring_info {
u64 desc;
u64 avail;
@@ -83,7 +72,7 @@ struct ifcvf_hw {
u32 dev_type;
struct virtio_pci_common_cfg __iomem *common_cfg;
void __iomem *net_cfg;
- struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2];
+ struct vring_info vring[IFCVF_MAX_QUEUES];
void __iomem * const *base;
char config_msix_name[256];
struct vdpa_callback config_cb;
@@ -103,7 +92,13 @@ struct ifcvf_vring_lm_cfg {
struct ifcvf_lm_cfg {
u8 reserved[IFCVF_LM_RING_STATE_OFFSET];
- struct ifcvf_vring_lm_cfg vring_lm_cfg[IFCVF_MAX_QUEUE_PAIRS];
+ struct ifcvf_vring_lm_cfg vring_lm_cfg[IFCVF_MAX_QUEUES];
+};
+
+struct ifcvf_vdpa_mgmt_dev {
+ struct vdpa_mgmt_dev mdev;
+ struct ifcvf_adapter *adapter;
+ struct pci_dev *pdev;
};
int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *dev);
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 351c6cfb24c3..dcd648e1f7e7 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -63,9 +63,13 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct ifcvf_hw *vf = &adapter->vf;
int vector, i, ret, irq;
+ u16 max_intr;
- ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
- IFCVF_MAX_INTR, PCI_IRQ_MSIX);
+ /* all queues and config interrupt */
+ max_intr = vf->nr_vring + 1;
+
+ ret = pci_alloc_irq_vectors(pdev, max_intr,
+ max_intr, PCI_IRQ_MSIX);
if (ret < 0) {
IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
return ret;
@@ -83,7 +87,7 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
return ret;
}
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ for (i = 0; i < vf->nr_vring; i++) {
snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
pci_name(pdev), i);
vector = i + IFCVF_MSI_QUEUE_OFF;
@@ -112,7 +116,6 @@ static int ifcvf_start_datapath(void *private)
u8 status;
int ret;
- vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
ret = ifcvf_start_hw(vf);
if (ret < 0) {
status = ifcvf_get_status(vf);
@@ -128,7 +131,7 @@ static int ifcvf_stop_datapath(void *private)
struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
int i;
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
+ for (i = 0; i < vf->nr_vring; i++)
vf->vring[i].cb.callback = NULL;
ifcvf_stop_hw(vf);
@@ -141,7 +144,7 @@ static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
int i;
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ for (i = 0; i < vf->nr_vring; i++) {
vf->vring[i].last_avail_idx = 0;
vf->vring[i].desc = 0;
vf->vring[i].avail = 0;
@@ -171,17 +174,12 @@ static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
struct pci_dev *pdev = adapter->pdev;
-
+ u32 type = vf->dev_type;
u64 features;
- switch (vf->dev_type) {
- case VIRTIO_ID_NET:
- features = ifcvf_get_features(vf) & IFCVF_NET_SUPPORTED_FEATURES;
- break;
- case VIRTIO_ID_BLOCK:
+ if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
features = ifcvf_get_features(vf);
- break;
- default:
+ else {
features = 0;
IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
}
@@ -218,23 +216,12 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
int ret;
vf = vdpa_to_vf(vdpa_dev);
- adapter = dev_get_drvdata(vdpa_dev->dev.parent);
+ adapter = vdpa_to_adapter(vdpa_dev);
status_old = ifcvf_get_status(vf);
if (status_old == status)
return;
- if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
- !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
- ifcvf_stop_datapath(adapter);
- ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
- }
-
- if (status == 0) {
- ifcvf_reset_vring(adapter);
- return;
- }
-
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
!(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
ret = ifcvf_request_irq(adapter);
@@ -254,6 +241,29 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
ifcvf_set_status(vf, status);
}
+static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_adapter *adapter;
+ struct ifcvf_hw *vf;
+ u8 status_old;
+
+ vf = vdpa_to_vf(vdpa_dev);
+ adapter = vdpa_to_adapter(vdpa_dev);
+ status_old = ifcvf_get_status(vf);
+
+ if (status_old == 0)
+ return 0;
+
+ if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
+ ifcvf_stop_datapath(adapter);
+ ifcvf_free_irq(adapter, vf->nr_vring);
+ }
+
+ ifcvf_reset_vring(adapter);
+
+ return 0;
+}
+
static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
{
return IFCVF_QUEUE_MAX;
@@ -437,6 +447,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.set_features = ifcvf_vdpa_set_features,
.get_status = ifcvf_vdpa_get_status,
.set_status = ifcvf_vdpa_set_status,
+ .reset = ifcvf_vdpa_reset,
.get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
.get_vq_state = ifcvf_vdpa_get_vq_state,
.set_vq_state = ifcvf_vdpa_set_vq_state,
@@ -458,63 +469,63 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.get_vq_notification = ifcvf_get_vq_notification,
};
-static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static struct virtio_device_id id_table_net[] = {
+ {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
+ {0},
+};
+
+static struct virtio_device_id id_table_blk[] = {
+ {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
+ {0},
+};
+
+static u32 get_dev_type(struct pci_dev *pdev)
{
- struct device *dev = &pdev->dev;
- struct ifcvf_adapter *adapter;
- struct ifcvf_hw *vf;
- int ret, i;
+ u32 dev_type;
- ret = pcim_enable_device(pdev);
- if (ret) {
- IFCVF_ERR(pdev, "Failed to enable device\n");
- return ret;
- }
+ /* This drirver drives both modern virtio devices and transitional
+ * devices in modern mode.
+ * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
+ * so legacy devices and transitional devices in legacy
+ * mode will not work for vDPA, this driver will not
+ * drive devices with legacy interface.
+ */
- ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
- IFCVF_DRIVER_NAME);
- if (ret) {
- IFCVF_ERR(pdev, "Failed to request MMIO region\n");
- return ret;
- }
+ if (pdev->device < 0x1040)
+ dev_type = pdev->subsystem_device;
+ else
+ dev_type = pdev->device - 0x1040;
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (ret) {
- IFCVF_ERR(pdev, "No usable DMA configuration\n");
- return ret;
- }
+ return dev_type;
+}
- ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
- if (ret) {
- IFCVF_ERR(pdev,
- "Failed for adding devres for freeing irq vectors\n");
- return ret;
- }
+static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
+{
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+ struct ifcvf_adapter *adapter;
+ struct pci_dev *pdev;
+ struct ifcvf_hw *vf;
+ struct device *dev;
+ int ret, i;
+ ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
+ if (ifcvf_mgmt_dev->adapter)
+ return -EOPNOTSUPP;
+
+ pdev = ifcvf_mgmt_dev->pdev;
+ dev = &pdev->dev;
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- dev, &ifc_vdpa_ops, NULL);
+ dev, &ifc_vdpa_ops, name, false);
if (IS_ERR(adapter)) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return PTR_ERR(adapter);
}
- pci_set_master(pdev);
- pci_set_drvdata(pdev, adapter);
+ ifcvf_mgmt_dev->adapter = adapter;
+ pci_set_drvdata(pdev, ifcvf_mgmt_dev);
vf = &adapter->vf;
-
- /* This drirver drives both modern virtio devices and transitional
- * devices in modern mode.
- * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
- * so legacy devices and transitional devices in legacy
- * mode will not work for vDPA, this driver will not
- * drive devices with legacy interface.
- */
- if (pdev->device < 0x1040)
- vf->dev_type = pdev->subsystem_device;
- else
- vf->dev_type = pdev->device - 0x1040;
-
+ vf->dev_type = get_dev_type(pdev);
vf->base = pcim_iomap_table(pdev);
adapter->pdev = pdev;
@@ -526,14 +537,15 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
+ for (i = 0; i < vf->nr_vring; i++)
vf->vring[i].irq = -EINVAL;
vf->hw_features = ifcvf_get_hw_features(vf);
- ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
+ adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
+ ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
if (ret) {
- IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
+ IFCVF_ERR(pdev, "Failed to register to vDPA bus");
goto err;
}
@@ -544,11 +556,100 @@ err:
return ret;
}
+static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
+{
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+
+ ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
+ _vdpa_unregister_device(dev);
+ ifcvf_mgmt_dev->adapter = NULL;
+}
+
+static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
+ .dev_add = ifcvf_vdpa_dev_add,
+ .dev_del = ifcvf_vdpa_dev_del
+};
+
+static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+ struct device *dev = &pdev->dev;
+ u32 dev_type;
+ int ret;
+
+ ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
+ if (!ifcvf_mgmt_dev) {
+ IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
+ return -ENOMEM;
+ }
+
+ dev_type = get_dev_type(pdev);
+ switch (dev_type) {
+ case VIRTIO_ID_NET:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_net;
+ break;
+ case VIRTIO_ID_BLOCK:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
+ break;
+ default:
+ IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
+ ifcvf_mgmt_dev->mdev.device = dev;
+ ifcvf_mgmt_dev->pdev = pdev;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to enable device\n");
+ goto err;
+ }
+
+ ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
+ IFCVF_DRIVER_NAME);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request MMIO region\n");
+ goto err;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ IFCVF_ERR(pdev, "No usable DMA configuration\n");
+ goto err;
+ }
+
+ ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "Failed for adding devres for freeing irq vectors\n");
+ goto err;
+ }
+
+ pci_set_master(pdev);
+
+ ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "Failed to initialize the management interfaces\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ kfree(ifcvf_mgmt_dev);
+ return ret;
+}
+
static void ifcvf_remove(struct pci_dev *pdev)
{
- struct ifcvf_adapter *adapter = pci_get_drvdata(pdev);
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
- vdpa_unregister_device(&adapter->vdpa);
+ ifcvf_mgmt_dev = pci_get_drvdata(pdev);
+ vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
+ kfree(ifcvf_mgmt_dev);
}
static struct pci_device_id ifcvf_pci_ids[] = {
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 0002b2136b48..01a848adf590 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -5,7 +5,7 @@
#define __MLX5_VDPA_H__
#include <linux/etherdevice.h>
-#include <linux/if_vlan.h>
+#include <linux/vringh.h>
#include <linux/vdpa.h>
#include <linux/mlx5/driver.h>
@@ -48,6 +48,26 @@ struct mlx5_vdpa_resources {
bool valid;
};
+struct mlx5_control_vq {
+ struct vhost_iotlb *iotlb;
+ /* spinlock to synchronize iommu table */
+ spinlock_t iommu_lock;
+ struct vringh vring;
+ bool ready;
+ u64 desc_addr;
+ u64 device_addr;
+ u64 driver_addr;
+ struct vdpa_callback event_cb;
+ struct vringh_kiov riov;
+ struct vringh_kiov wiov;
+ unsigned short head;
+};
+
+struct mlx5_ctrl_wq_ent {
+ struct work_struct work;
+ struct mlx5_vdpa_dev *mvdev;
+};
+
struct mlx5_vdpa_dev {
struct vdpa_device vdev;
struct mlx5_core_dev *mdev;
@@ -57,9 +77,12 @@ struct mlx5_vdpa_dev {
u64 actual_features;
u8 status;
u32 max_vqs;
+ u16 max_idx;
u32 generation;
struct mlx5_vdpa_mr mr;
+ struct mlx5_control_vq cvq;
+ struct workqueue_struct *wq;
};
int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
@@ -68,6 +91,7 @@ int mlx5_vdpa_get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey);
int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn);
int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn);
+int mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 rqtn);
void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn);
int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn);
void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn);
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index e59135fa867e..ff010c6d0cd3 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+#include <linux/vhost_types.h>
#include <linux/vdpa.h>
#include <linux/gcd.h>
#include <linux/string.h>
@@ -451,33 +452,30 @@ static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
mlx5_vdpa_destroy_mkey(mvdev, &mr->mkey);
}
-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int dup_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *src)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ struct vhost_iotlb_map *map;
+ u64 start = 0, last = ULLONG_MAX;
int err;
- if (mr->initialized)
- return 0;
-
- if (iotlb)
- err = create_user_mr(mvdev, iotlb);
- else
- err = create_dma_mr(mvdev, mr);
-
- if (!err)
- mr->initialized = true;
+ if (!src) {
+ err = vhost_iotlb_add_range(mvdev->cvq.iotlb, start, last, start, VHOST_ACCESS_RW);
+ return err;
+ }
- return err;
+ for (map = vhost_iotlb_itree_first(src, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start, map->last,
+ map->addr, map->perm);
+ if (err)
+ return err;
+ }
+ return 0;
}
-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static void prune_iotlb(struct mlx5_vdpa_dev *mvdev)
{
- int err;
-
- mutex_lock(&mvdev->mr.mkey_mtx);
- err = _mlx5_vdpa_create_mr(mvdev, iotlb);
- mutex_unlock(&mvdev->mr.mkey_mtx);
- return err;
+ vhost_iotlb_del_range(mvdev->cvq.iotlb, 0, ULLONG_MAX);
}
static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
@@ -501,6 +499,7 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
if (!mr->initialized)
goto out;
+ prune_iotlb(mvdev);
if (mr->user_mr)
destroy_user_mr(mvdev, mr);
else
@@ -512,6 +511,48 @@ out:
mutex_unlock(&mr->mkey_mtx);
}
+static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ int err;
+
+ if (mr->initialized)
+ return 0;
+
+ if (iotlb)
+ err = create_user_mr(mvdev, iotlb);
+ else
+ err = create_dma_mr(mvdev, mr);
+
+ if (err)
+ return err;
+
+ err = dup_iotlb(mvdev, iotlb);
+ if (err)
+ goto out_err;
+
+ mr->initialized = true;
+ return 0;
+
+out_err:
+ if (iotlb)
+ destroy_user_mr(mvdev, mr);
+ else
+ destroy_dma_mr(mvdev, mr);
+
+ return err;
+}
+
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ int err;
+
+ mutex_lock(&mvdev->mr.mkey_mtx);
+ err = _mlx5_vdpa_create_mr(mvdev, iotlb);
+ mutex_unlock(&mvdev->mr.mkey_mtx);
+ return err;
+}
+
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map)
{
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
index d4606213f88a..15e266d0e27a 100644
--- a/drivers/vdpa/mlx5/core/resources.c
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+#include <linux/iova.h>
#include <linux/mlx5/driver.h>
#include "mlx5_vdpa.h"
@@ -128,6 +129,16 @@ int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *
return err;
}
+int mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 rqtn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
+
+ MLX5_SET(modify_rqt_in, in, uid, mvdev->res.uid);
+ MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
+ MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
+ return mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
+}
+
void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn)
{
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
@@ -221,6 +232,22 @@ int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *m
return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in);
}
+static int init_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
+{
+ mvdev->cvq.iotlb = vhost_iotlb_alloc(0, 0);
+ if (!mvdev->cvq.iotlb)
+ return -ENOMEM;
+
+ vringh_set_iotlb(&mvdev->cvq.vring, mvdev->cvq.iotlb, &mvdev->cvq.iommu_lock);
+
+ return 0;
+}
+
+static void cleanup_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
+{
+ vhost_iotlb_free(mvdev->cvq.iotlb);
+}
+
int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
{
u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset);
@@ -260,10 +287,17 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
err = -ENOMEM;
goto err_key;
}
+
+ err = init_ctrl_vq(mvdev);
+ if (err)
+ goto err_ctrl;
+
res->valid = true;
return 0;
+err_ctrl:
+ iounmap(res->kick_addr);
err_key:
dealloc_pd(mvdev, res->pdn, res->uid);
err_pd:
@@ -282,6 +316,7 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
if (!res->valid)
return;
+ cleanup_ctrl_vq(mvdev);
iounmap(res->kick_addr);
res->kick_addr = NULL;
dealloc_pd(mvdev, res->pdn, res->uid);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 5906cada2293..294ba05e6fc9 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -45,6 +45,8 @@ MODULE_LICENSE("Dual BSD/GPL");
(VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK | \
VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED)
+#define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature)))
+
struct mlx5_vdpa_net_resources {
u32 tisn;
u32 tdn;
@@ -90,7 +92,6 @@ struct mlx5_vq_restore_info {
u16 avail_index;
u16 used_index;
bool ready;
- struct vdpa_callback cb;
bool restore;
};
@@ -100,7 +101,6 @@ struct mlx5_vdpa_virtqueue {
u64 device_addr;
u64 driver_addr;
u32 num_ent;
- struct vdpa_callback event_cb;
/* Resources for implementing the notification channel from the device
* to the driver. fwqp is the firmware end of an RC connection; the
@@ -135,11 +135,20 @@ struct mlx5_vdpa_virtqueue {
*/
#define MLX5_MAX_SUPPORTED_VQS 16
+static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
+{
+ if (unlikely(idx > mvdev->max_idx))
+ return false;
+
+ return true;
+}
+
struct mlx5_vdpa_net {
struct mlx5_vdpa_dev mvdev;
struct mlx5_vdpa_net_resources res;
struct virtio_net_config config;
struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
+ struct vdpa_callback event_cbs[MLX5_MAX_SUPPORTED_VQS + 1];
/* Serialize vq resources creation and destruction. This is required
* since memory map might change and we need to destroy and create
@@ -151,15 +160,18 @@ struct mlx5_vdpa_net {
struct mlx5_flow_handle *rx_rule;
bool setup;
u16 mtu;
+ u32 cur_num_vqs;
};
static void free_resources(struct mlx5_vdpa_net *ndev);
static void init_mvqs(struct mlx5_vdpa_net *ndev);
-static int setup_driver(struct mlx5_vdpa_net *ndev);
+static int setup_driver(struct mlx5_vdpa_dev *mvdev);
static void teardown_driver(struct mlx5_vdpa_net *ndev);
static bool mlx5_vdpa_debug;
+#define MLX5_CVQ_MAX_ENT 16
+
#define MLX5_LOG_VIO_FLAG(_feature) \
do { \
if (features & BIT_ULL(_feature)) \
@@ -172,11 +184,41 @@ static bool mlx5_vdpa_debug;
mlx5_vdpa_info(mvdev, "%s\n", #_status); \
} while (0)
+/* TODO: cross-endian support */
+static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
+{
+ return virtio_legacy_is_little_endian() ||
+ (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
+}
+
+static u16 mlx5vdpa16_to_cpu(struct mlx5_vdpa_dev *mvdev, __virtio16 val)
+{
+ return __virtio16_to_cpu(mlx5_vdpa_is_little_endian(mvdev), val);
+}
+
+static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
+{
+ return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
+}
+
static inline u32 mlx5_vdpa_max_qps(int max_vqs)
{
return max_vqs / 2;
}
+static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
+{
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
+ return 2;
+
+ return 2 * mlx5_vdpa_max_qps(mvdev->max_vqs);
+}
+
+static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
+{
+ return idx == ctrl_vq_idx(mvdev);
+}
+
static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set)
{
if (status & ~VALID_STATUS_MASK)
@@ -481,6 +523,10 @@ static int mlx5_vdpa_poll_one(struct mlx5_vdpa_cq *vcq)
static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num)
{
+ struct mlx5_vdpa_net *ndev = mvq->ndev;
+ struct vdpa_callback *event_cb;
+
+ event_cb = &ndev->event_cbs[mvq->index];
mlx5_cq_set_ci(&mvq->cq.mcq);
/* make sure CQ cosumer update is visible to the hardware before updating
@@ -488,8 +534,8 @@ static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int nu
*/
dma_wmb();
rx_post(&mvq->vqqp, num);
- if (mvq->event_cb.callback)
- mvq->event_cb.callback(mvq->event_cb.private);
+ if (event_cb->callback)
+ event_cb->callback(event_cb->private);
}
static void mlx5_vdpa_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
@@ -1100,10 +1146,8 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
if (!mvq->num_ent)
return 0;
- if (mvq->initialized) {
- mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n");
- return -EINVAL;
- }
+ if (mvq->initialized)
+ return 0;
err = cq_create(ndev, idx, mvq->num_ent);
if (err)
@@ -1190,19 +1234,20 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
static int create_rqt(struct mlx5_vdpa_net *ndev)
{
- int log_max_rqt;
__be32 *list;
+ int max_rqt;
void *rqtc;
int inlen;
void *in;
int i, j;
int err;
- log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
- if (log_max_rqt < 1)
+ max_rqt = min_t(int, MLX5_MAX_SUPPORTED_VQS / 2,
+ 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
+ if (max_rqt < 1)
return -EOPNOTSUPP;
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + (1 << log_max_rqt) * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1211,10 +1256,9 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
- MLX5_SET(rqtc, rqtc, rqt_max_size, 1 << log_max_rqt);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, 1);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) {
+ for (i = 0, j = 0; j < max_rqt; j++) {
if (!ndev->vqs[j].initialized)
continue;
@@ -1223,6 +1267,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
i++;
}
}
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, i);
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
kfree(in);
@@ -1232,6 +1277,52 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
return 0;
}
+#define MLX5_MODIFY_RQT_NUM_RQS ((u64)1)
+
+static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
+{
+ __be32 *list;
+ int max_rqt;
+ void *rqtc;
+ int inlen;
+ void *in;
+ int i, j;
+ int err;
+
+ max_rqt = min_t(int, ndev->cur_num_vqs / 2,
+ 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
+ if (max_rqt < 1)
+ return -EOPNOTSUPP;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid);
+ MLX5_SET64(modify_rqt_in, in, bitmask, MLX5_MODIFY_RQT_NUM_RQS);
+ rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
+ MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
+
+ list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
+ for (i = 0, j = 0; j < num; j++) {
+ if (!ndev->vqs[j].initialized)
+ continue;
+
+ if (!vq_is_tx(ndev->vqs[j].index)) {
+ list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
+ i++;
+ }
+ }
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, i);
+ err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
+ kfree(in);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static void destroy_rqt(struct mlx5_vdpa_net *ndev)
{
mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn);
@@ -1345,12 +1436,206 @@ static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
ndev->rx_rule = NULL;
}
+static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
+{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ struct mlx5_core_dev *pfmdev;
+ size_t read;
+ u8 mac[ETH_ALEN];
+
+ pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
+ switch (cmd) {
+ case VIRTIO_NET_CTRL_MAC_ADDR_SET:
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)mac, ETH_ALEN);
+ if (read != ETH_ALEN)
+ break;
+
+ if (!memcmp(ndev->config.mac, mac, 6)) {
+ status = VIRTIO_NET_OK;
+ break;
+ }
+
+ if (!is_zero_ether_addr(ndev->config.mac)) {
+ if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) {
+ mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n",
+ ndev->config.mac);
+ break;
+ }
+ }
+
+ if (mlx5_mpfs_add_mac(pfmdev, mac)) {
+ mlx5_vdpa_warn(mvdev, "failed to insert new MAC %pM into MPFS table\n",
+ mac);
+ break;
+ }
+
+ memcpy(ndev->config.mac, mac, ETH_ALEN);
+ status = VIRTIO_NET_OK;
+ break;
+
+ default:
+ break;
+ }
+
+ return status;
+}
+
+static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps)
+{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ int cur_qps = ndev->cur_num_vqs / 2;
+ int err;
+ int i;
+
+ if (cur_qps > newqps) {
+ err = modify_rqt(ndev, 2 * newqps);
+ if (err)
+ return err;
+
+ for (i = ndev->cur_num_vqs - 1; i >= 2 * newqps; i--)
+ teardown_vq(ndev, &ndev->vqs[i]);
+
+ ndev->cur_num_vqs = 2 * newqps;
+ } else {
+ ndev->cur_num_vqs = 2 * newqps;
+ for (i = cur_qps * 2; i < 2 * newqps; i++) {
+ err = setup_vq(ndev, &ndev->vqs[i]);
+ if (err)
+ goto clean_added;
+ }
+ err = modify_rqt(ndev, 2 * newqps);
+ if (err)
+ goto clean_added;
+ }
+ return 0;
+
+clean_added:
+ for (--i; i >= cur_qps; --i)
+ teardown_vq(ndev, &ndev->vqs[i]);
+
+ return err;
+}
+
+static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
+{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+ struct virtio_net_ctrl_mq mq;
+ size_t read;
+ u16 newqps;
+
+ switch (cmd) {
+ case VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET:
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq));
+ if (read != sizeof(mq))
+ break;
+
+ newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
+ if (ndev->cur_num_vqs == 2 * newqps) {
+ status = VIRTIO_NET_OK;
+ break;
+ }
+
+ if (newqps & (newqps - 1))
+ break;
+
+ if (!change_num_qps(mvdev, newqps))
+ status = VIRTIO_NET_OK;
+
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
+
+static void mlx5_cvq_kick_handler(struct work_struct *work)
+{
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ struct virtio_net_ctrl_hdr ctrl;
+ struct mlx5_ctrl_wq_ent *wqent;
+ struct mlx5_vdpa_dev *mvdev;
+ struct mlx5_control_vq *cvq;
+ struct mlx5_vdpa_net *ndev;
+ size_t read, write;
+ int err;
+
+ wqent = container_of(work, struct mlx5_ctrl_wq_ent, work);
+ mvdev = wqent->mvdev;
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+ cvq = &mvdev->cvq;
+ if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+ goto out;
+
+ if (!cvq->ready)
+ goto out;
+
+ while (true) {
+ err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head,
+ GFP_ATOMIC);
+ if (err <= 0)
+ break;
+
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &ctrl, sizeof(ctrl));
+ if (read != sizeof(ctrl))
+ break;
+
+ switch (ctrl.class) {
+ case VIRTIO_NET_CTRL_MAC:
+ status = handle_ctrl_mac(mvdev, ctrl.cmd);
+ break;
+ case VIRTIO_NET_CTRL_MQ:
+ status = handle_ctrl_mq(mvdev, ctrl.cmd);
+ break;
+
+ default:
+ break;
+ }
+
+ /* Make sure data is written before advancing index */
+ smp_wmb();
+
+ write = vringh_iov_push_iotlb(&cvq->vring, &cvq->wiov, &status, sizeof(status));
+ vringh_complete_iotlb(&cvq->vring, cvq->head, write);
+ vringh_kiov_cleanup(&cvq->riov);
+ vringh_kiov_cleanup(&cvq->wiov);
+
+ if (vringh_need_notify_iotlb(&cvq->vring))
+ vringh_notify(&cvq->vring);
+ }
+out:
+ kfree(wqent);
+}
+
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
+ struct mlx5_ctrl_wq_ent *wqent;
+
+ if (!is_index_valid(mvdev, idx))
+ return;
+
+ if (unlikely(is_ctrl_vq_idx(mvdev, idx))) {
+ if (!mvdev->cvq.ready)
+ return;
+
+ wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
+ if (!wqent)
+ return;
+ wqent->mvdev = mvdev;
+ INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
+ queue_work(mvdev->wq, &wqent->work);
+ return;
+ }
+
+ mvq = &ndev->vqs[idx];
if (unlikely(!mvq->ready))
return;
@@ -1362,8 +1647,19 @@ static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
+
+ if (!is_index_valid(mvdev, idx))
+ return -EINVAL;
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ mvdev->cvq.desc_addr = desc_area;
+ mvdev->cvq.device_addr = device_area;
+ mvdev->cvq.driver_addr = driver_area;
+ return 0;
+ }
+
+ mvq = &ndev->vqs[idx];
mvq->desc_addr = desc_area;
mvq->device_addr = device_area;
mvq->driver_addr = driver_area;
@@ -1376,6 +1672,9 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
+ if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
+ return;
+
mvq = &ndev->vqs[idx];
mvq->num_ent = num;
}
@@ -1384,17 +1683,46 @@ static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_c
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *vq = &ndev->vqs[idx];
- vq->event_cb = *cb;
+ ndev->event_cbs[idx] = *cb;
+}
+
+static void mlx5_cvq_notify(struct vringh *vring)
+{
+ struct mlx5_control_vq *cvq = container_of(vring, struct mlx5_control_vq, vring);
+
+ if (!cvq->event_cb.callback)
+ return;
+
+ cvq->event_cb.callback(cvq->event_cb.private);
+}
+
+static void set_cvq_ready(struct mlx5_vdpa_dev *mvdev, bool ready)
+{
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+
+ cvq->ready = ready;
+ if (!ready)
+ return;
+
+ cvq->vring.notify = mlx5_cvq_notify;
}
static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
+
+ if (!is_index_valid(mvdev, idx))
+ return;
+
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ set_cvq_ready(mvdev, ready);
+ return;
+ }
+ mvq = &ndev->vqs[idx];
if (!ready)
suspend_vq(ndev, mvq);
@@ -1405,9 +1733,14 @@ static bool mlx5_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
- return mvq->ready;
+ if (!is_index_valid(mvdev, idx))
+ return false;
+
+ if (is_ctrl_vq_idx(mvdev, idx))
+ return mvdev->cvq.ready;
+
+ return ndev->vqs[idx].ready;
}
static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
@@ -1415,8 +1748,17 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
+ if (!is_index_valid(mvdev, idx))
+ return -EINVAL;
+
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ mvdev->cvq.vring.last_avail_idx = state->split.avail_index;
+ return 0;
+ }
+
+ mvq = &ndev->vqs[idx];
if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) {
mlx5_vdpa_warn(mvdev, "can't modify available index\n");
return -EINVAL;
@@ -1431,10 +1773,19 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
struct mlx5_virtq_attr attr;
int err;
+ if (!is_index_valid(mvdev, idx))
+ return -EINVAL;
+
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ state->split.avail_index = mvdev->cvq.vring.last_avail_idx;
+ return 0;
+ }
+
+ mvq = &ndev->vqs[idx];
/* If the virtq object was destroyed, use the value saved at
* the last minute of suspend_vq. This caters for userspace
* that cares about emulating the index after vq is stopped.
@@ -1491,10 +1842,14 @@ static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
u16 dev_features;
dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
- ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features);
+ ndev->mvdev.mlx_features |= mlx_to_vritio_features(dev_features);
if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1);
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ);
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR);
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MQ);
+
print_features(mvdev, ndev->mvdev.mlx_features, false);
return ndev->mvdev.mlx_features;
}
@@ -1507,17 +1862,29 @@ static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
return 0;
}
-static int setup_virtqueues(struct mlx5_vdpa_net *ndev)
+static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
int err;
int i;
- for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) {
+ for (i = 0; i < 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); i++) {
err = setup_vq(ndev, &ndev->vqs[i]);
if (err)
goto err_vq;
}
+ if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) {
+ err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
+ MLX5_CVQ_MAX_ENT, false,
+ (struct vring_desc *)(uintptr_t)cvq->desc_addr,
+ (struct vring_avail *)(uintptr_t)cvq->driver_addr,
+ (struct vring_used *)(uintptr_t)cvq->device_addr);
+ if (err)
+ goto err_vq;
+ }
+
return 0;
err_vq:
@@ -1541,16 +1908,22 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
}
}
-/* TODO: cross-endian support */
-static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
-{
- return virtio_legacy_is_little_endian() ||
- (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
-}
-
-static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
+static void update_cvq_info(struct mlx5_vdpa_dev *mvdev)
{
- return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
+ if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_CTRL_VQ)) {
+ if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) {
+ /* MQ supported. CVQ index is right above the last data virtqueue's */
+ mvdev->max_idx = mvdev->max_vqs;
+ } else {
+ /* Only CVQ supportted. data virtqueues occupy indices 0 and 1.
+ * CVQ gets index 2
+ */
+ mvdev->max_idx = 2;
+ }
+ } else {
+ /* Two data virtqueues only: one for rx and one for tx */
+ mvdev->max_idx = 1;
+ }
}
static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
@@ -1568,6 +1941,7 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
+ update_cvq_info(mvdev);
return err;
}
@@ -1605,15 +1979,14 @@ static u8 mlx5_vdpa_get_status(struct vdpa_device *vdev)
static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
struct mlx5_vq_restore_info *ri = &mvq->ri;
- struct mlx5_virtq_attr attr;
+ struct mlx5_virtq_attr attr = {};
int err;
- if (!mvq->initialized)
- return 0;
-
- err = query_virtqueue(ndev, mvq, &attr);
- if (err)
- return err;
+ if (mvq->initialized) {
+ err = query_virtqueue(ndev, mvq, &attr);
+ if (err)
+ return err;
+ }
ri->avail_index = attr.available_index;
ri->used_index = attr.used_index;
@@ -1622,7 +1995,6 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
ri->desc_addr = mvq->desc_addr;
ri->device_addr = mvq->device_addr;
ri->driver_addr = mvq->driver_addr;
- ri->cb = mvq->event_cb;
ri->restore = true;
return 0;
}
@@ -1667,12 +2039,12 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
mvq->desc_addr = ri->desc_addr;
mvq->device_addr = ri->device_addr;
mvq->driver_addr = ri->driver_addr;
- mvq->event_cb = ri->cb;
}
}
-static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *iotlb)
+static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
suspend_vqs(ndev);
@@ -1681,58 +2053,59 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *
goto err_mr;
teardown_driver(ndev);
- mlx5_vdpa_destroy_mr(&ndev->mvdev);
- err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb);
+ mlx5_vdpa_destroy_mr(mvdev);
+ err = mlx5_vdpa_create_mr(mvdev, iotlb);
if (err)
goto err_mr;
- if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
+ if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
return 0;
restore_channels_info(ndev);
- err = setup_driver(ndev);
+ err = setup_driver(mvdev);
if (err)
goto err_setup;
return 0;
err_setup:
- mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ mlx5_vdpa_destroy_mr(mvdev);
err_mr:
return err;
}
-static int setup_driver(struct mlx5_vdpa_net *ndev)
+static int setup_driver(struct mlx5_vdpa_dev *mvdev)
{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
mutex_lock(&ndev->reslock);
if (ndev->setup) {
- mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n");
+ mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
err = 0;
goto out;
}
- err = setup_virtqueues(ndev);
+ err = setup_virtqueues(mvdev);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n");
+ mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
goto out;
}
err = create_rqt(ndev);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n");
+ mlx5_vdpa_warn(mvdev, "create_rqt\n");
goto err_rqt;
}
err = create_tir(ndev);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n");
+ mlx5_vdpa_warn(mvdev, "create_tir\n");
goto err_tir;
}
err = add_fwd_to_tir(ndev);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n");
+ mlx5_vdpa_warn(mvdev, "add_fwd_to_tir\n");
goto err_fwd;
}
ndev->setup = true;
@@ -1781,24 +2154,10 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
int err;
print_status(mvdev, status, true);
- if (!status) {
- mlx5_vdpa_info(mvdev, "performing device reset\n");
- teardown_driver(ndev);
- clear_vqs_ready(ndev);
- mlx5_vdpa_destroy_mr(&ndev->mvdev);
- ndev->mvdev.status = 0;
- ndev->mvdev.mlx_features = 0;
- ++mvdev->generation;
- if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
- if (mlx5_vdpa_create_mr(mvdev, NULL))
- mlx5_vdpa_warn(mvdev, "create MR failed\n");
- }
- return;
- }
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
- err = setup_driver(ndev);
+ err = setup_driver(mvdev);
if (err) {
mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
goto err_setup;
@@ -1817,6 +2176,29 @@ err_setup:
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
}
+static int mlx5_vdpa_reset(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+
+ print_status(mvdev, 0, true);
+ mlx5_vdpa_info(mvdev, "performing device reset\n");
+ teardown_driver(ndev);
+ clear_vqs_ready(ndev);
+ mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ ndev->mvdev.status = 0;
+ ndev->mvdev.mlx_features = 0;
+ memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs));
+ ndev->mvdev.actual_features = 0;
+ ++mvdev->generation;
+ if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
+ if (mlx5_vdpa_create_mr(mvdev, NULL))
+ mlx5_vdpa_warn(mvdev, "create MR failed\n");
+ }
+
+ return 0;
+}
+
static size_t mlx5_vdpa_get_config_size(struct vdpa_device *vdev)
{
return sizeof(struct virtio_net_config);
@@ -1848,7 +2230,6 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
- struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
bool change_map;
int err;
@@ -1859,7 +2240,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
}
if (change_map)
- return mlx5_vdpa_change_map(ndev, iotlb);
+ return mlx5_vdpa_change_map(mvdev, iotlb);
return 0;
}
@@ -1889,6 +2270,9 @@ static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device
struct mlx5_vdpa_net *ndev;
phys_addr_t addr;
+ if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
+ return ret;
+
/* If SF BAR size is smaller than PAGE_SIZE, do not use direct
* notification to avoid the risk of mapping pages that contain BAR of more
* than one SF
@@ -1928,6 +2312,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_vendor_id = mlx5_vdpa_get_vendor_id,
.get_status = mlx5_vdpa_get_status,
.set_status = mlx5_vdpa_set_status,
+ .reset = mlx5_vdpa_reset,
.get_config_size = mlx5_vdpa_get_config_size,
.get_config = mlx5_vdpa_get_config,
.set_config = mlx5_vdpa_set_config,
@@ -2040,7 +2425,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
- name);
+ name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
@@ -2063,8 +2448,11 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
err = mlx5_mpfs_add_mac(pfmdev, config->mac);
if (err)
goto err_mtu;
+
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
}
+ config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs));
mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
@@ -2080,8 +2468,15 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
if (err)
goto err_mr;
+ mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_ctrl_wq");
+ if (!mvdev->wq) {
+ err = -ENOMEM;
+ goto err_res2;
+ }
+
+ ndev->cur_num_vqs = 2 * mlx5_vdpa_max_qps(max_vqs);
mvdev->vdev.mdev = &mgtdev->mgtdev;
- err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs));
+ err = _vdpa_register_device(&mvdev->vdev, ndev->cur_num_vqs + 1);
if (err)
goto err_reg;
@@ -2089,6 +2484,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
return 0;
err_reg:
+ destroy_workqueue(mvdev->wq);
+err_res2:
free_resources(ndev);
err_mr:
mlx5_vdpa_destroy_mr(mvdev);
@@ -2106,7 +2503,9 @@ err_mtu:
static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev)
{
struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(dev);
+ destroy_workqueue(mvdev->wq);
_vdpa_unregister_device(dev);
mgtdev->ndev = NULL;
}
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 3fc4525fc05c..1dc121a07a93 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -69,6 +69,7 @@ static void vdpa_release_dev(struct device *d)
* @config: the bus operations that is supported by this device
* @size: size of the parent structure that contains private data
* @name: name of the vdpa device; optional.
+ * @use_va: indicate whether virtual address must be used by this device
*
* Driver should use vdpa_alloc_device() wrapper macro instead of
* using this directly.
@@ -78,7 +79,8 @@ static void vdpa_release_dev(struct device *d)
*/
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
- size_t size, const char *name)
+ size_t size, const char *name,
+ bool use_va)
{
struct vdpa_device *vdev;
int err = -EINVAL;
@@ -89,6 +91,10 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
if (!!config->dma_map != !!config->dma_unmap)
goto err;
+ /* It should only work for the device that use on-chip IOMMU */
+ if (use_va && !(config->dma_map || config->set_map))
+ goto err;
+
err = -ENOMEM;
vdev = kzalloc(size, GFP_KERNEL);
if (!vdev)
@@ -104,6 +110,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
vdev->index = err;
vdev->config = config;
vdev->features_valid = false;
+ vdev->use_va = use_va;
if (name)
err = dev_set_name(&vdev->dev, "%s", name);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index c621cf7feec0..5f484fff8dbe 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -92,7 +92,7 @@ static void vdpasim_vq_reset(struct vdpasim *vdpasim,
vq->vring.notify = NULL;
}
-static void vdpasim_reset(struct vdpasim *vdpasim)
+static void vdpasim_do_reset(struct vdpasim *vdpasim)
{
int i;
@@ -137,7 +137,8 @@ static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
int ret;
/* We set the limit_pfn to the maximum (ULONG_MAX - 1) */
- iova = alloc_iova(&vdpasim->iova, size, ULONG_MAX - 1, true);
+ iova = alloc_iova(&vdpasim->iova, size >> iova_shift(&vdpasim->iova),
+ ULONG_MAX - 1, true);
if (!iova)
return DMA_MAPPING_ERROR;
@@ -250,7 +251,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
ops = &vdpasim_config_ops;
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
- dev_attr->name);
+ dev_attr->name, false);
if (IS_ERR(vdpasim)) {
ret = PTR_ERR(vdpasim);
goto err_alloc;
@@ -459,11 +460,21 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
spin_lock(&vdpasim->lock);
vdpasim->status = status;
- if (status == 0)
- vdpasim_reset(vdpasim);
spin_unlock(&vdpasim->lock);
}
+static int vdpasim_reset(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ spin_lock(&vdpasim->lock);
+ vdpasim->status = 0;
+ vdpasim_do_reset(vdpasim);
+ spin_unlock(&vdpasim->lock);
+
+ return 0;
+}
+
static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -544,14 +555,14 @@ err:
}
static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
- u64 pa, u32 perm)
+ u64 pa, u32 perm, void *opaque)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
int ret;
spin_lock(&vdpasim->iommu_lock);
- ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
- perm);
+ ret = vhost_iotlb_add_range_ctx(vdpasim->iommu, iova, iova + size - 1,
+ pa, perm, opaque);
spin_unlock(&vdpasim->iommu_lock);
return ret;
@@ -607,6 +618,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_vendor_id = vdpasim_get_vendor_id,
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
+ .reset = vdpasim_reset,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
@@ -635,6 +647,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_vendor_id = vdpasim_get_vendor_id,
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
+ .reset = vdpasim_reset,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
diff --git a/drivers/vdpa/vdpa_user/Makefile b/drivers/vdpa/vdpa_user/Makefile
new file mode 100644
index 000000000000..260e0b26af99
--- /dev/null
+++ b/drivers/vdpa/vdpa_user/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+vduse-y := vduse_dev.o iova_domain.o
+
+obj-$(CONFIG_VDPA_USER) += vduse.o
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
new file mode 100644
index 000000000000..1daae2608860
--- /dev/null
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MMU-based software IOTLB.
+ *
+ * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
+ *
+ * Author: Xie Yongji <xieyongji@bytedance.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+#include <linux/vdpa.h>
+
+#include "iova_domain.h"
+
+static int vduse_iotlb_add_range(struct vduse_iova_domain *domain,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm,
+ struct file *file, u64 offset)
+{
+ struct vdpa_map_file *map_file;
+ int ret;
+
+ map_file = kmalloc(sizeof(*map_file), GFP_ATOMIC);
+ if (!map_file)
+ return -ENOMEM;
+
+ map_file->file = get_file(file);
+ map_file->offset = offset;
+
+ ret = vhost_iotlb_add_range_ctx(domain->iotlb, start, last,
+ addr, perm, map_file);
+ if (ret) {
+ fput(map_file->file);
+ kfree(map_file);
+ return ret;
+ }
+ return 0;
+}
+
+static void vduse_iotlb_del_range(struct vduse_iova_domain *domain,
+ u64 start, u64 last)
+{
+ struct vdpa_map_file *map_file;
+ struct vhost_iotlb_map *map;
+
+ while ((map = vhost_iotlb_itree_first(domain->iotlb, start, last))) {
+ map_file = (struct vdpa_map_file *)map->opaque;
+ fput(map_file->file);
+ kfree(map_file);
+ vhost_iotlb_map_free(domain->iotlb, map);
+ }
+}
+
+int vduse_domain_set_map(struct vduse_iova_domain *domain,
+ struct vhost_iotlb *iotlb)
+{
+ struct vdpa_map_file *map_file;
+ struct vhost_iotlb_map *map;
+ u64 start = 0ULL, last = ULLONG_MAX;
+ int ret;
+
+ spin_lock(&domain->iotlb_lock);
+ vduse_iotlb_del_range(domain, start, last);
+
+ for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ map_file = (struct vdpa_map_file *)map->opaque;
+ ret = vduse_iotlb_add_range(domain, map->start, map->last,
+ map->addr, map->perm,
+ map_file->file,
+ map_file->offset);
+ if (ret)
+ goto err;
+ }
+ spin_unlock(&domain->iotlb_lock);
+
+ return 0;
+err:
+ vduse_iotlb_del_range(domain, start, last);
+ spin_unlock(&domain->iotlb_lock);
+ return ret;
+}
+
+void vduse_domain_clear_map(struct vduse_iova_domain *domain,
+ struct vhost_iotlb *iotlb)
+{
+ struct vhost_iotlb_map *map;
+ u64 start = 0ULL, last = ULLONG_MAX;
+
+ spin_lock(&domain->iotlb_lock);
+ for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ vduse_iotlb_del_range(domain, map->start, map->last);
+ }
+ spin_unlock(&domain->iotlb_lock);
+}
+
+static int vduse_domain_map_bounce_page(struct vduse_iova_domain *domain,
+ u64 iova, u64 size, u64 paddr)
+{
+ struct vduse_bounce_map *map;
+ u64 last = iova + size - 1;
+
+ while (iova <= last) {
+ map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ if (!map->bounce_page) {
+ map->bounce_page = alloc_page(GFP_ATOMIC);
+ if (!map->bounce_page)
+ return -ENOMEM;
+ }
+ map->orig_phys = paddr;
+ paddr += PAGE_SIZE;
+ iova += PAGE_SIZE;
+ }
+ return 0;
+}
+
+static void vduse_domain_unmap_bounce_page(struct vduse_iova_domain *domain,
+ u64 iova, u64 size)
+{
+ struct vduse_bounce_map *map;
+ u64 last = iova + size - 1;
+
+ while (iova <= last) {
+ map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ map->orig_phys = INVALID_PHYS_ADDR;
+ iova += PAGE_SIZE;
+ }
+}
+
+static void do_bounce(phys_addr_t orig, void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(orig);
+ unsigned int offset = offset_in_page(orig);
+ char *buffer;
+ unsigned int sz = 0;
+
+ while (size) {
+ sz = min_t(size_t, PAGE_SIZE - offset, size);
+
+ buffer = kmap_atomic(pfn_to_page(pfn));
+ if (dir == DMA_TO_DEVICE)
+ memcpy(addr, buffer + offset, sz);
+ else
+ memcpy(buffer + offset, addr, sz);
+ kunmap_atomic(buffer);
+
+ size -= sz;
+ pfn++;
+ addr += sz;
+ offset = 0;
+ }
+}
+
+static void vduse_domain_bounce(struct vduse_iova_domain *domain,
+ dma_addr_t iova, size_t size,
+ enum dma_data_direction dir)
+{
+ struct vduse_bounce_map *map;
+ unsigned int offset;
+ void *addr;
+ size_t sz;
+
+ if (iova >= domain->bounce_size)
+ return;
+
+ while (size) {
+ map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ offset = offset_in_page(iova);
+ sz = min_t(size_t, PAGE_SIZE - offset, size);
+
+ if (WARN_ON(!map->bounce_page ||
+ map->orig_phys == INVALID_PHYS_ADDR))
+ return;
+
+ addr = page_address(map->bounce_page) + offset;
+ do_bounce(map->orig_phys + offset, addr, sz, dir);
+ size -= sz;
+ iova += sz;
+ }
+}
+
+static struct page *
+vduse_domain_get_coherent_page(struct vduse_iova_domain *domain, u64 iova)
+{
+ u64 start = iova & PAGE_MASK;
+ u64 last = start + PAGE_SIZE - 1;
+ struct vhost_iotlb_map *map;
+ struct page *page = NULL;
+
+ spin_lock(&domain->iotlb_lock);
+ map = vhost_iotlb_itree_first(domain->iotlb, start, last);
+ if (!map)
+ goto out;
+
+ page = pfn_to_page((map->addr + iova - map->start) >> PAGE_SHIFT);
+ get_page(page);
+out:
+ spin_unlock(&domain->iotlb_lock);
+
+ return page;
+}
+
+static struct page *
+vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
+{
+ struct vduse_bounce_map *map;
+ struct page *page = NULL;
+
+ spin_lock(&domain->iotlb_lock);
+ map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ if (!map->bounce_page)
+ goto out;
+
+ page = map->bounce_page;
+ get_page(page);
+out:
+ spin_unlock(&domain->iotlb_lock);
+
+ return page;
+}
+
+static void
+vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
+{
+ struct vduse_bounce_map *map;
+ unsigned long pfn, bounce_pfns;
+
+ bounce_pfns = domain->bounce_size >> PAGE_SHIFT;
+
+ for (pfn = 0; pfn < bounce_pfns; pfn++) {
+ map = &domain->bounce_maps[pfn];
+ if (WARN_ON(map->orig_phys != INVALID_PHYS_ADDR))
+ continue;
+
+ if (!map->bounce_page)
+ continue;
+
+ __free_page(map->bounce_page);
+ map->bounce_page = NULL;
+ }
+}
+
+void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain)
+{
+ if (!domain->bounce_map)
+ return;
+
+ spin_lock(&domain->iotlb_lock);
+ if (!domain->bounce_map)
+ goto unlock;
+
+ vduse_iotlb_del_range(domain, 0, domain->bounce_size - 1);
+ domain->bounce_map = 0;
+unlock:
+ spin_unlock(&domain->iotlb_lock);
+}
+
+static int vduse_domain_init_bounce_map(struct vduse_iova_domain *domain)
+{
+ int ret = 0;
+
+ if (domain->bounce_map)
+ return 0;
+
+ spin_lock(&domain->iotlb_lock);
+ if (domain->bounce_map)
+ goto unlock;
+
+ ret = vduse_iotlb_add_range(domain, 0, domain->bounce_size - 1,
+ 0, VHOST_MAP_RW, domain->file, 0);
+ if (ret)
+ goto unlock;
+
+ domain->bounce_map = 1;
+unlock:
+ spin_unlock(&domain->iotlb_lock);
+ return ret;
+}
+
+static dma_addr_t
+vduse_domain_alloc_iova(struct iova_domain *iovad,
+ unsigned long size, unsigned long limit)
+{
+ unsigned long shift = iova_shift(iovad);
+ unsigned long iova_len = iova_align(iovad, size) >> shift;
+ unsigned long iova_pfn;
+
+ /*
+ * Freeing non-power-of-two-sized allocations back into the IOVA caches
+ * will come back to bite us badly, so we have to waste a bit of space
+ * rounding up anything cacheable to make sure that can't happen. The
+ * order of the unadjusted size will still match upon freeing.
+ */
+ if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
+ iova_len = roundup_pow_of_two(iova_len);
+ iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
+
+ return iova_pfn << shift;
+}
+
+static void vduse_domain_free_iova(struct iova_domain *iovad,
+ dma_addr_t iova, size_t size)
+{
+ unsigned long shift = iova_shift(iovad);
+ unsigned long iova_len = iova_align(iovad, size) >> shift;
+
+ free_iova_fast(iovad, iova >> shift, iova_len);
+}
+
+dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
+ struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct iova_domain *iovad = &domain->stream_iovad;
+ unsigned long limit = domain->bounce_size - 1;
+ phys_addr_t pa = page_to_phys(page) + offset;
+ dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
+
+ if (!iova)
+ return DMA_MAPPING_ERROR;
+
+ if (vduse_domain_init_bounce_map(domain))
+ goto err;
+
+ if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
+ goto err;
+
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
+ vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
+
+ return iova;
+err:
+ vduse_domain_free_iova(iovad, iova, size);
+ return DMA_MAPPING_ERROR;
+}
+
+void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ struct iova_domain *iovad = &domain->stream_iovad;
+
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+ vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
+
+ vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
+ vduse_domain_free_iova(iovad, dma_addr, size);
+}
+
+void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
+ size_t size, dma_addr_t *dma_addr,
+ gfp_t flag, unsigned long attrs)
+{
+ struct iova_domain *iovad = &domain->consistent_iovad;
+ unsigned long limit = domain->iova_limit;
+ dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
+ void *orig = alloc_pages_exact(size, flag);
+
+ if (!iova || !orig)
+ goto err;
+
+ spin_lock(&domain->iotlb_lock);
+ if (vduse_iotlb_add_range(domain, (u64)iova, (u64)iova + size - 1,
+ virt_to_phys(orig), VHOST_MAP_RW,
+ domain->file, (u64)iova)) {
+ spin_unlock(&domain->iotlb_lock);
+ goto err;
+ }
+ spin_unlock(&domain->iotlb_lock);
+
+ *dma_addr = iova;
+
+ return orig;
+err:
+ *dma_addr = DMA_MAPPING_ERROR;
+ if (orig)
+ free_pages_exact(orig, size);
+ if (iova)
+ vduse_domain_free_iova(iovad, iova, size);
+
+ return NULL;
+}
+
+void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ unsigned long attrs)
+{
+ struct iova_domain *iovad = &domain->consistent_iovad;
+ struct vhost_iotlb_map *map;
+ struct vdpa_map_file *map_file;
+ phys_addr_t pa;
+
+ spin_lock(&domain->iotlb_lock);
+ map = vhost_iotlb_itree_first(domain->iotlb, (u64)dma_addr,
+ (u64)dma_addr + size - 1);
+ if (WARN_ON(!map)) {
+ spin_unlock(&domain->iotlb_lock);
+ return;
+ }
+ map_file = (struct vdpa_map_file *)map->opaque;
+ fput(map_file->file);
+ kfree(map_file);
+ pa = map->addr;
+ vhost_iotlb_map_free(domain->iotlb, map);
+ spin_unlock(&domain->iotlb_lock);
+
+ vduse_domain_free_iova(iovad, dma_addr, size);
+ free_pages_exact(phys_to_virt(pa), size);
+}
+
+static vm_fault_t vduse_domain_mmap_fault(struct vm_fault *vmf)
+{
+ struct vduse_iova_domain *domain = vmf->vma->vm_private_data;
+ unsigned long iova = vmf->pgoff << PAGE_SHIFT;
+ struct page *page;
+
+ if (!domain)
+ return VM_FAULT_SIGBUS;
+
+ if (iova < domain->bounce_size)
+ page = vduse_domain_get_bounce_page(domain, iova);
+ else
+ page = vduse_domain_get_coherent_page(domain, iova);
+
+ if (!page)
+ return VM_FAULT_SIGBUS;
+
+ vmf->page = page;
+
+ return 0;
+}
+
+static const struct vm_operations_struct vduse_domain_mmap_ops = {
+ .fault = vduse_domain_mmap_fault,
+};
+
+static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct vduse_iova_domain *domain = file->private_data;
+
+ vma->vm_flags |= VM_DONTDUMP | VM_DONTEXPAND;
+ vma->vm_private_data = domain;
+ vma->vm_ops = &vduse_domain_mmap_ops;
+
+ return 0;
+}
+
+static int vduse_domain_release(struct inode *inode, struct file *file)
+{
+ struct vduse_iova_domain *domain = file->private_data;
+
+ spin_lock(&domain->iotlb_lock);
+ vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
+ vduse_domain_free_bounce_pages(domain);
+ spin_unlock(&domain->iotlb_lock);
+ put_iova_domain(&domain->stream_iovad);
+ put_iova_domain(&domain->consistent_iovad);
+ vhost_iotlb_free(domain->iotlb);
+ vfree(domain->bounce_maps);
+ kfree(domain);
+
+ return 0;
+}
+
+static const struct file_operations vduse_domain_fops = {
+ .owner = THIS_MODULE,
+ .mmap = vduse_domain_mmap,
+ .release = vduse_domain_release,
+};
+
+void vduse_domain_destroy(struct vduse_iova_domain *domain)
+{
+ fput(domain->file);
+}
+
+struct vduse_iova_domain *
+vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
+{
+ struct vduse_iova_domain *domain;
+ struct file *file;
+ struct vduse_bounce_map *map;
+ unsigned long pfn, bounce_pfns;
+
+ bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT;
+ if (iova_limit <= bounce_size)
+ return NULL;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+
+ domain->iotlb = vhost_iotlb_alloc(0, 0);
+ if (!domain->iotlb)
+ goto err_iotlb;
+
+ domain->iova_limit = iova_limit;
+ domain->bounce_size = PAGE_ALIGN(bounce_size);
+ domain->bounce_maps = vzalloc(bounce_pfns *
+ sizeof(struct vduse_bounce_map));
+ if (!domain->bounce_maps)
+ goto err_map;
+
+ for (pfn = 0; pfn < bounce_pfns; pfn++) {
+ map = &domain->bounce_maps[pfn];
+ map->orig_phys = INVALID_PHYS_ADDR;
+ }
+ file = anon_inode_getfile("[vduse-domain]", &vduse_domain_fops,
+ domain, O_RDWR);
+ if (IS_ERR(file))
+ goto err_file;
+
+ domain->file = file;
+ spin_lock_init(&domain->iotlb_lock);
+ init_iova_domain(&domain->stream_iovad,
+ PAGE_SIZE, IOVA_START_PFN);
+ init_iova_domain(&domain->consistent_iovad,
+ PAGE_SIZE, bounce_pfns);
+
+ return domain;
+err_file:
+ vfree(domain->bounce_maps);
+err_map:
+ vhost_iotlb_free(domain->iotlb);
+err_iotlb:
+ kfree(domain);
+ return NULL;
+}
+
+int vduse_domain_init(void)
+{
+ return iova_cache_get();
+}
+
+void vduse_domain_exit(void)
+{
+ iova_cache_put();
+}
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
new file mode 100644
index 000000000000..2722d9b8e21a
--- /dev/null
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * MMU-based software IOTLB.
+ *
+ * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
+ *
+ * Author: Xie Yongji <xieyongji@bytedance.com>
+ *
+ */
+
+#ifndef _VDUSE_IOVA_DOMAIN_H
+#define _VDUSE_IOVA_DOMAIN_H
+
+#include <linux/iova.h>
+#include <linux/dma-mapping.h>
+#include <linux/vhost_iotlb.h>
+
+#define IOVA_START_PFN 1
+
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
+
+struct vduse_bounce_map {
+ struct page *bounce_page;
+ u64 orig_phys;
+};
+
+struct vduse_iova_domain {
+ struct iova_domain stream_iovad;
+ struct iova_domain consistent_iovad;
+ struct vduse_bounce_map *bounce_maps;
+ size_t bounce_size;
+ unsigned long iova_limit;
+ int bounce_map;
+ struct vhost_iotlb *iotlb;
+ spinlock_t iotlb_lock;
+ struct file *file;
+};
+
+int vduse_domain_set_map(struct vduse_iova_domain *domain,
+ struct vhost_iotlb *iotlb);
+
+void vduse_domain_clear_map(struct vduse_iova_domain *domain,
+ struct vhost_iotlb *iotlb);
+
+dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
+ struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+
+void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+
+void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
+ size_t size, dma_addr_t *dma_addr,
+ gfp_t flag, unsigned long attrs);
+
+void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ unsigned long attrs);
+
+void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
+
+void vduse_domain_destroy(struct vduse_iova_domain *domain);
+
+struct vduse_iova_domain *vduse_domain_create(unsigned long iova_limit,
+ size_t bounce_size);
+
+int vduse_domain_init(void);
+
+void vduse_domain_exit(void);
+
+#endif /* _VDUSE_IOVA_DOMAIN_H */
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
new file mode 100644
index 000000000000..29a38ecba19e
--- /dev/null
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -0,0 +1,1641 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VDUSE: vDPA Device in Userspace
+ *
+ * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
+ *
+ * Author: Xie Yongji <xieyongji@bytedance.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/dma-map-ops.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/vdpa.h>
+#include <linux/nospec.h>
+#include <uapi/linux/vduse.h>
+#include <uapi/linux/vdpa.h>
+#include <uapi/linux/virtio_config.h>
+#include <uapi/linux/virtio_ids.h>
+#include <uapi/linux/virtio_blk.h>
+#include <linux/mod_devicetable.h>
+
+#include "iova_domain.h"
+
+#define DRV_AUTHOR "Yongji Xie <xieyongji@bytedance.com>"
+#define DRV_DESC "vDPA Device in Userspace"
+#define DRV_LICENSE "GPL v2"
+
+#define VDUSE_DEV_MAX (1U << MINORBITS)
+#define VDUSE_BOUNCE_SIZE (64 * 1024 * 1024)
+#define VDUSE_IOVA_SIZE (128 * 1024 * 1024)
+#define VDUSE_MSG_DEFAULT_TIMEOUT 30
+
+struct vduse_virtqueue {
+ u16 index;
+ u16 num_max;
+ u32 num;
+ u64 desc_addr;
+ u64 driver_addr;
+ u64 device_addr;
+ struct vdpa_vq_state state;
+ bool ready;
+ bool kicked;
+ spinlock_t kick_lock;
+ spinlock_t irq_lock;
+ struct eventfd_ctx *kickfd;
+ struct vdpa_callback cb;
+ struct work_struct inject;
+ struct work_struct kick;
+};
+
+struct vduse_dev;
+
+struct vduse_vdpa {
+ struct vdpa_device vdpa;
+ struct vduse_dev *dev;
+};
+
+struct vduse_dev {
+ struct vduse_vdpa *vdev;
+ struct device *dev;
+ struct vduse_virtqueue *vqs;
+ struct vduse_iova_domain *domain;
+ char *name;
+ struct mutex lock;
+ spinlock_t msg_lock;
+ u64 msg_unique;
+ u32 msg_timeout;
+ wait_queue_head_t waitq;
+ struct list_head send_list;
+ struct list_head recv_list;
+ struct vdpa_callback config_cb;
+ struct work_struct inject;
+ spinlock_t irq_lock;
+ int minor;
+ bool broken;
+ bool connected;
+ u64 api_version;
+ u64 device_features;
+ u64 driver_features;
+ u32 device_id;
+ u32 vendor_id;
+ u32 generation;
+ u32 config_size;
+ void *config;
+ u8 status;
+ u32 vq_num;
+ u32 vq_align;
+};
+
+struct vduse_dev_msg {
+ struct vduse_dev_request req;
+ struct vduse_dev_response resp;
+ struct list_head list;
+ wait_queue_head_t waitq;
+ bool completed;
+};
+
+struct vduse_control {
+ u64 api_version;
+};
+
+static DEFINE_MUTEX(vduse_lock);
+static DEFINE_IDR(vduse_idr);
+
+static dev_t vduse_major;
+static struct class *vduse_class;
+static struct cdev vduse_ctrl_cdev;
+static struct cdev vduse_cdev;
+static struct workqueue_struct *vduse_irq_wq;
+
+static u32 allowed_device_id[] = {
+ VIRTIO_ID_BLOCK,
+};
+
+static inline struct vduse_dev *vdpa_to_vduse(struct vdpa_device *vdpa)
+{
+ struct vduse_vdpa *vdev = container_of(vdpa, struct vduse_vdpa, vdpa);
+
+ return vdev->dev;
+}
+
+static inline struct vduse_dev *dev_to_vduse(struct device *dev)
+{
+ struct vdpa_device *vdpa = dev_to_vdpa(dev);
+
+ return vdpa_to_vduse(vdpa);
+}
+
+static struct vduse_dev_msg *vduse_find_msg(struct list_head *head,
+ uint32_t request_id)
+{
+ struct vduse_dev_msg *msg;
+
+ list_for_each_entry(msg, head, list) {
+ if (msg->req.request_id == request_id) {
+ list_del(&msg->list);
+ return msg;
+ }
+ }
+
+ return NULL;
+}
+
+static struct vduse_dev_msg *vduse_dequeue_msg(struct list_head *head)
+{
+ struct vduse_dev_msg *msg = NULL;
+
+ if (!list_empty(head)) {
+ msg = list_first_entry(head, struct vduse_dev_msg, list);
+ list_del(&msg->list);
+ }
+
+ return msg;
+}
+
+static void vduse_enqueue_msg(struct list_head *head,
+ struct vduse_dev_msg *msg)
+{
+ list_add_tail(&msg->list, head);
+}
+
+static void vduse_dev_broken(struct vduse_dev *dev)
+{
+ struct vduse_dev_msg *msg, *tmp;
+
+ if (unlikely(dev->broken))
+ return;
+
+ list_splice_init(&dev->recv_list, &dev->send_list);
+ list_for_each_entry_safe(msg, tmp, &dev->send_list, list) {
+ list_del(&msg->list);
+ msg->completed = 1;
+ msg->resp.result = VDUSE_REQ_RESULT_FAILED;
+ wake_up(&msg->waitq);
+ }
+ dev->broken = true;
+ wake_up(&dev->waitq);
+}
+
+static int vduse_dev_msg_sync(struct vduse_dev *dev,
+ struct vduse_dev_msg *msg)
+{
+ int ret;
+
+ if (unlikely(dev->broken))
+ return -EIO;
+
+ init_waitqueue_head(&msg->waitq);
+ spin_lock(&dev->msg_lock);
+ if (unlikely(dev->broken)) {
+ spin_unlock(&dev->msg_lock);
+ return -EIO;
+ }
+ msg->req.request_id = dev->msg_unique++;
+ vduse_enqueue_msg(&dev->send_list, msg);
+ wake_up(&dev->waitq);
+ spin_unlock(&dev->msg_lock);
+ if (dev->msg_timeout)
+ ret = wait_event_killable_timeout(msg->waitq, msg->completed,
+ (long)dev->msg_timeout * HZ);
+ else
+ ret = wait_event_killable(msg->waitq, msg->completed);
+
+ spin_lock(&dev->msg_lock);
+ if (!msg->completed) {
+ list_del(&msg->list);
+ msg->resp.result = VDUSE_REQ_RESULT_FAILED;
+ /* Mark the device as malfunction when there is a timeout */
+ if (!ret)
+ vduse_dev_broken(dev);
+ }
+ ret = (msg->resp.result == VDUSE_REQ_RESULT_OK) ? 0 : -EIO;
+ spin_unlock(&dev->msg_lock);
+
+ return ret;
+}
+
+static int vduse_dev_get_vq_state_packed(struct vduse_dev *dev,
+ struct vduse_virtqueue *vq,
+ struct vdpa_vq_state_packed *packed)
+{
+ struct vduse_dev_msg msg = { 0 };
+ int ret;
+
+ msg.req.type = VDUSE_GET_VQ_STATE;
+ msg.req.vq_state.index = vq->index;
+
+ ret = vduse_dev_msg_sync(dev, &msg);
+ if (ret)
+ return ret;
+
+ packed->last_avail_counter =
+ msg.resp.vq_state.packed.last_avail_counter & 0x0001;
+ packed->last_avail_idx =
+ msg.resp.vq_state.packed.last_avail_idx & 0x7FFF;
+ packed->last_used_counter =
+ msg.resp.vq_state.packed.last_used_counter & 0x0001;
+ packed->last_used_idx =
+ msg.resp.vq_state.packed.last_used_idx & 0x7FFF;
+
+ return 0;
+}
+
+static int vduse_dev_get_vq_state_split(struct vduse_dev *dev,
+ struct vduse_virtqueue *vq,
+ struct vdpa_vq_state_split *split)
+{
+ struct vduse_dev_msg msg = { 0 };
+ int ret;
+
+ msg.req.type = VDUSE_GET_VQ_STATE;
+ msg.req.vq_state.index = vq->index;
+
+ ret = vduse_dev_msg_sync(dev, &msg);
+ if (ret)
+ return ret;
+
+ split->avail_index = msg.resp.vq_state.split.avail_index;
+
+ return 0;
+}
+
+static int vduse_dev_set_status(struct vduse_dev *dev, u8 status)
+{
+ struct vduse_dev_msg msg = { 0 };
+
+ msg.req.type = VDUSE_SET_STATUS;
+ msg.req.s.status = status;
+
+ return vduse_dev_msg_sync(dev, &msg);
+}
+
+static int vduse_dev_update_iotlb(struct vduse_dev *dev,
+ u64 start, u64 last)
+{
+ struct vduse_dev_msg msg = { 0 };
+
+ if (last < start)
+ return -EINVAL;
+
+ msg.req.type = VDUSE_UPDATE_IOTLB;
+ msg.req.iova.start = start;
+ msg.req.iova.last = last;
+
+ return vduse_dev_msg_sync(dev, &msg);
+}
+
+static ssize_t vduse_dev_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vduse_dev *dev = file->private_data;
+ struct vduse_dev_msg *msg;
+ int size = sizeof(struct vduse_dev_request);
+ ssize_t ret;
+
+ if (iov_iter_count(to) < size)
+ return -EINVAL;
+
+ spin_lock(&dev->msg_lock);
+ while (1) {
+ msg = vduse_dequeue_msg(&dev->send_list);
+ if (msg)
+ break;
+
+ ret = -EAGAIN;
+ if (file->f_flags & O_NONBLOCK)
+ goto unlock;
+
+ spin_unlock(&dev->msg_lock);
+ ret = wait_event_interruptible_exclusive(dev->waitq,
+ !list_empty(&dev->send_list));
+ if (ret)
+ return ret;
+
+ spin_lock(&dev->msg_lock);
+ }
+ spin_unlock(&dev->msg_lock);
+ ret = copy_to_iter(&msg->req, size, to);
+ spin_lock(&dev->msg_lock);
+ if (ret != size) {
+ ret = -EFAULT;
+ vduse_enqueue_msg(&dev->send_list, msg);
+ goto unlock;
+ }
+ vduse_enqueue_msg(&dev->recv_list, msg);
+unlock:
+ spin_unlock(&dev->msg_lock);
+
+ return ret;
+}
+
+static bool is_mem_zero(const char *ptr, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (ptr[i])
+ return false;
+ }
+ return true;
+}
+
+static ssize_t vduse_dev_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vduse_dev *dev = file->private_data;
+ struct vduse_dev_response resp;
+ struct vduse_dev_msg *msg;
+ size_t ret;
+
+ ret = copy_from_iter(&resp, sizeof(resp), from);
+ if (ret != sizeof(resp))
+ return -EINVAL;
+
+ if (!is_mem_zero((const char *)resp.reserved, sizeof(resp.reserved)))
+ return -EINVAL;
+
+ spin_lock(&dev->msg_lock);
+ msg = vduse_find_msg(&dev->recv_list, resp.request_id);
+ if (!msg) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ memcpy(&msg->resp, &resp, sizeof(resp));
+ msg->completed = 1;
+ wake_up(&msg->waitq);
+unlock:
+ spin_unlock(&dev->msg_lock);
+
+ return ret;
+}
+
+static __poll_t vduse_dev_poll(struct file *file, poll_table *wait)
+{
+ struct vduse_dev *dev = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &dev->waitq, wait);
+
+ spin_lock(&dev->msg_lock);
+
+ if (unlikely(dev->broken))
+ mask |= EPOLLERR;
+ if (!list_empty(&dev->send_list))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (!list_empty(&dev->recv_list))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+
+ spin_unlock(&dev->msg_lock);
+
+ return mask;
+}
+
+static void vduse_dev_reset(struct vduse_dev *dev)
+{
+ int i;
+ struct vduse_iova_domain *domain = dev->domain;
+
+ /* The coherent mappings are handled in vduse_dev_free_coherent() */
+ if (domain->bounce_map)
+ vduse_domain_reset_bounce_map(domain);
+
+ dev->status = 0;
+ dev->driver_features = 0;
+ dev->generation++;
+ spin_lock(&dev->irq_lock);
+ dev->config_cb.callback = NULL;
+ dev->config_cb.private = NULL;
+ spin_unlock(&dev->irq_lock);
+ flush_work(&dev->inject);
+
+ for (i = 0; i < dev->vq_num; i++) {
+ struct vduse_virtqueue *vq = &dev->vqs[i];
+
+ vq->ready = false;
+ vq->desc_addr = 0;
+ vq->driver_addr = 0;
+ vq->device_addr = 0;
+ vq->num = 0;
+ memset(&vq->state, 0, sizeof(vq->state));
+
+ spin_lock(&vq->kick_lock);
+ vq->kicked = false;
+ if (vq->kickfd)
+ eventfd_ctx_put(vq->kickfd);
+ vq->kickfd = NULL;
+ spin_unlock(&vq->kick_lock);
+
+ spin_lock(&vq->irq_lock);
+ vq->cb.callback = NULL;
+ vq->cb.private = NULL;
+ spin_unlock(&vq->irq_lock);
+ flush_work(&vq->inject);
+ flush_work(&vq->kick);
+ }
+}
+
+static int vduse_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 idx,
+ u64 desc_area, u64 driver_area,
+ u64 device_area)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ vq->desc_addr = desc_area;
+ vq->driver_addr = driver_area;
+ vq->device_addr = device_area;
+
+ return 0;
+}
+
+static void vduse_vq_kick(struct vduse_virtqueue *vq)
+{
+ spin_lock(&vq->kick_lock);
+ if (!vq->ready)
+ goto unlock;
+
+ if (vq->kickfd)
+ eventfd_signal(vq->kickfd, 1);
+ else
+ vq->kicked = true;
+unlock:
+ spin_unlock(&vq->kick_lock);
+}
+
+static void vduse_vq_kick_work(struct work_struct *work)
+{
+ struct vduse_virtqueue *vq = container_of(work,
+ struct vduse_virtqueue, kick);
+
+ vduse_vq_kick(vq);
+}
+
+static void vduse_vdpa_kick_vq(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ if (!eventfd_signal_allowed()) {
+ schedule_work(&vq->kick);
+ return;
+ }
+ vduse_vq_kick(vq);
+}
+
+static void vduse_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
+ struct vdpa_callback *cb)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ spin_lock(&vq->irq_lock);
+ vq->cb.callback = cb->callback;
+ vq->cb.private = cb->private;
+ spin_unlock(&vq->irq_lock);
+}
+
+static void vduse_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ vq->num = num;
+}
+
+static void vduse_vdpa_set_vq_ready(struct vdpa_device *vdpa,
+ u16 idx, bool ready)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ vq->ready = ready;
+}
+
+static bool vduse_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ return vq->ready;
+}
+
+static int vduse_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 idx,
+ const struct vdpa_vq_state *state)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
+ vq->state.packed.last_avail_counter =
+ state->packed.last_avail_counter;
+ vq->state.packed.last_avail_idx = state->packed.last_avail_idx;
+ vq->state.packed.last_used_counter =
+ state->packed.last_used_counter;
+ vq->state.packed.last_used_idx = state->packed.last_used_idx;
+ } else
+ vq->state.split.avail_index = state->split.avail_index;
+
+ return 0;
+}
+
+static int vduse_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 idx,
+ struct vdpa_vq_state *state)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED))
+ return vduse_dev_get_vq_state_packed(dev, vq, &state->packed);
+
+ return vduse_dev_get_vq_state_split(dev, vq, &state->split);
+}
+
+static u32 vduse_vdpa_get_vq_align(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->vq_align;
+}
+
+static u64 vduse_vdpa_get_features(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->device_features;
+}
+
+static int vduse_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ dev->driver_features = features;
+ return 0;
+}
+
+static void vduse_vdpa_set_config_cb(struct vdpa_device *vdpa,
+ struct vdpa_callback *cb)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ spin_lock(&dev->irq_lock);
+ dev->config_cb.callback = cb->callback;
+ dev->config_cb.private = cb->private;
+ spin_unlock(&dev->irq_lock);
+}
+
+static u16 vduse_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ u16 num_max = 0;
+ int i;
+
+ for (i = 0; i < dev->vq_num; i++)
+ if (num_max < dev->vqs[i].num_max)
+ num_max = dev->vqs[i].num_max;
+
+ return num_max;
+}
+
+static u32 vduse_vdpa_get_device_id(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->device_id;
+}
+
+static u32 vduse_vdpa_get_vendor_id(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->vendor_id;
+}
+
+static u8 vduse_vdpa_get_status(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->status;
+}
+
+static void vduse_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ if (vduse_dev_set_status(dev, status))
+ return;
+
+ dev->status = status;
+}
+
+static size_t vduse_vdpa_get_config_size(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->config_size;
+}
+
+static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ if (len > dev->config_size - offset)
+ return;
+
+ memcpy(buf, dev->config + offset, len);
+}
+
+static void vduse_vdpa_set_config(struct vdpa_device *vdpa, unsigned int offset,
+ const void *buf, unsigned int len)
+{
+ /* Now we only support read-only configuration space */
+}
+
+static int vduse_vdpa_reset(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ if (vduse_dev_set_status(dev, 0))
+ return -EIO;
+
+ vduse_dev_reset(dev);
+
+ return 0;
+}
+
+static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->generation;
+}
+
+static int vduse_vdpa_set_map(struct vdpa_device *vdpa,
+ struct vhost_iotlb *iotlb)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ int ret;
+
+ ret = vduse_domain_set_map(dev->domain, iotlb);
+ if (ret)
+ return ret;
+
+ ret = vduse_dev_update_iotlb(dev, 0ULL, ULLONG_MAX);
+ if (ret) {
+ vduse_domain_clear_map(dev->domain, iotlb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vduse_vdpa_free(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ dev->vdev = NULL;
+}
+
+static const struct vdpa_config_ops vduse_vdpa_config_ops = {
+ .set_vq_address = vduse_vdpa_set_vq_address,
+ .kick_vq = vduse_vdpa_kick_vq,
+ .set_vq_cb = vduse_vdpa_set_vq_cb,
+ .set_vq_num = vduse_vdpa_set_vq_num,
+ .set_vq_ready = vduse_vdpa_set_vq_ready,
+ .get_vq_ready = vduse_vdpa_get_vq_ready,
+ .set_vq_state = vduse_vdpa_set_vq_state,
+ .get_vq_state = vduse_vdpa_get_vq_state,
+ .get_vq_align = vduse_vdpa_get_vq_align,
+ .get_features = vduse_vdpa_get_features,
+ .set_features = vduse_vdpa_set_features,
+ .set_config_cb = vduse_vdpa_set_config_cb,
+ .get_vq_num_max = vduse_vdpa_get_vq_num_max,
+ .get_device_id = vduse_vdpa_get_device_id,
+ .get_vendor_id = vduse_vdpa_get_vendor_id,
+ .get_status = vduse_vdpa_get_status,
+ .set_status = vduse_vdpa_set_status,
+ .get_config_size = vduse_vdpa_get_config_size,
+ .get_config = vduse_vdpa_get_config,
+ .set_config = vduse_vdpa_set_config,
+ .get_generation = vduse_vdpa_get_generation,
+ .reset = vduse_vdpa_reset,
+ .set_map = vduse_vdpa_set_map,
+ .free = vduse_vdpa_free,
+};
+
+static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
+}
+
+static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
+}
+
+static void *vduse_dev_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t flag,
+ unsigned long attrs)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+ unsigned long iova;
+ void *addr;
+
+ *dma_addr = DMA_MAPPING_ERROR;
+ addr = vduse_domain_alloc_coherent(domain, size,
+ (dma_addr_t *)&iova, flag, attrs);
+ if (!addr)
+ return NULL;
+
+ *dma_addr = (dma_addr_t)iova;
+
+ return addr;
+}
+
+static void vduse_dev_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ unsigned long attrs)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
+}
+
+static size_t vduse_dev_max_mapping_size(struct device *dev)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ return domain->bounce_size;
+}
+
+static const struct dma_map_ops vduse_dev_dma_ops = {
+ .map_page = vduse_dev_map_page,
+ .unmap_page = vduse_dev_unmap_page,
+ .alloc = vduse_dev_alloc_coherent,
+ .free = vduse_dev_free_coherent,
+ .max_mapping_size = vduse_dev_max_mapping_size,
+};
+
+static unsigned int perm_to_file_flags(u8 perm)
+{
+ unsigned int flags = 0;
+
+ switch (perm) {
+ case VDUSE_ACCESS_WO:
+ flags |= O_WRONLY;
+ break;
+ case VDUSE_ACCESS_RO:
+ flags |= O_RDONLY;
+ break;
+ case VDUSE_ACCESS_RW:
+ flags |= O_RDWR;
+ break;
+ default:
+ WARN(1, "invalidate vhost IOTLB permission\n");
+ break;
+ }
+
+ return flags;
+}
+
+static int vduse_kickfd_setup(struct vduse_dev *dev,
+ struct vduse_vq_eventfd *eventfd)
+{
+ struct eventfd_ctx *ctx = NULL;
+ struct vduse_virtqueue *vq;
+ u32 index;
+
+ if (eventfd->index >= dev->vq_num)
+ return -EINVAL;
+
+ index = array_index_nospec(eventfd->index, dev->vq_num);
+ vq = &dev->vqs[index];
+ if (eventfd->fd >= 0) {
+ ctx = eventfd_ctx_fdget(eventfd->fd);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+ } else if (eventfd->fd != VDUSE_EVENTFD_DEASSIGN)
+ return 0;
+
+ spin_lock(&vq->kick_lock);
+ if (vq->kickfd)
+ eventfd_ctx_put(vq->kickfd);
+ vq->kickfd = ctx;
+ if (vq->ready && vq->kicked && vq->kickfd) {
+ eventfd_signal(vq->kickfd, 1);
+ vq->kicked = false;
+ }
+ spin_unlock(&vq->kick_lock);
+
+ return 0;
+}
+
+static bool vduse_dev_is_ready(struct vduse_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->vq_num; i++)
+ if (!dev->vqs[i].num_max)
+ return false;
+
+ return true;
+}
+
+static void vduse_dev_irq_inject(struct work_struct *work)
+{
+ struct vduse_dev *dev = container_of(work, struct vduse_dev, inject);
+
+ spin_lock_irq(&dev->irq_lock);
+ if (dev->config_cb.callback)
+ dev->config_cb.callback(dev->config_cb.private);
+ spin_unlock_irq(&dev->irq_lock);
+}
+
+static void vduse_vq_irq_inject(struct work_struct *work)
+{
+ struct vduse_virtqueue *vq = container_of(work,
+ struct vduse_virtqueue, inject);
+
+ spin_lock_irq(&vq->irq_lock);
+ if (vq->ready && vq->cb.callback)
+ vq->cb.callback(vq->cb.private);
+ spin_unlock_irq(&vq->irq_lock);
+}
+
+static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct vduse_dev *dev = file->private_data;
+ void __user *argp = (void __user *)arg;
+ int ret;
+
+ if (unlikely(dev->broken))
+ return -EPERM;
+
+ switch (cmd) {
+ case VDUSE_IOTLB_GET_FD: {
+ struct vduse_iotlb_entry entry;
+ struct vhost_iotlb_map *map;
+ struct vdpa_map_file *map_file;
+ struct vduse_iova_domain *domain = dev->domain;
+ struct file *f = NULL;
+
+ ret = -EFAULT;
+ if (copy_from_user(&entry, argp, sizeof(entry)))
+ break;
+
+ ret = -EINVAL;
+ if (entry.start > entry.last)
+ break;
+
+ spin_lock(&domain->iotlb_lock);
+ map = vhost_iotlb_itree_first(domain->iotlb,
+ entry.start, entry.last);
+ if (map) {
+ map_file = (struct vdpa_map_file *)map->opaque;
+ f = get_file(map_file->file);
+ entry.offset = map_file->offset;
+ entry.start = map->start;
+ entry.last = map->last;
+ entry.perm = map->perm;
+ }
+ spin_unlock(&domain->iotlb_lock);
+ ret = -EINVAL;
+ if (!f)
+ break;
+
+ ret = -EFAULT;
+ if (copy_to_user(argp, &entry, sizeof(entry))) {
+ fput(f);
+ break;
+ }
+ ret = receive_fd(f, perm_to_file_flags(entry.perm));
+ fput(f);
+ break;
+ }
+ case VDUSE_DEV_GET_FEATURES:
+ /*
+ * Just mirror what driver wrote here.
+ * The driver is expected to check FEATURE_OK later.
+ */
+ ret = put_user(dev->driver_features, (u64 __user *)argp);
+ break;
+ case VDUSE_DEV_SET_CONFIG: {
+ struct vduse_config_data config;
+ unsigned long size = offsetof(struct vduse_config_data,
+ buffer);
+
+ ret = -EFAULT;
+ if (copy_from_user(&config, argp, size))
+ break;
+
+ ret = -EINVAL;
+ if (config.length == 0 ||
+ config.length > dev->config_size - config.offset)
+ break;
+
+ ret = -EFAULT;
+ if (copy_from_user(dev->config + config.offset, argp + size,
+ config.length))
+ break;
+
+ ret = 0;
+ break;
+ }
+ case VDUSE_DEV_INJECT_CONFIG_IRQ:
+ ret = 0;
+ queue_work(vduse_irq_wq, &dev->inject);
+ break;
+ case VDUSE_VQ_SETUP: {
+ struct vduse_vq_config config;
+ u32 index;
+
+ ret = -EFAULT;
+ if (copy_from_user(&config, argp, sizeof(config)))
+ break;
+
+ ret = -EINVAL;
+ if (config.index >= dev->vq_num)
+ break;
+
+ if (!is_mem_zero((const char *)config.reserved,
+ sizeof(config.reserved)))
+ break;
+
+ index = array_index_nospec(config.index, dev->vq_num);
+ dev->vqs[index].num_max = config.max_size;
+ ret = 0;
+ break;
+ }
+ case VDUSE_VQ_GET_INFO: {
+ struct vduse_vq_info vq_info;
+ struct vduse_virtqueue *vq;
+ u32 index;
+
+ ret = -EFAULT;
+ if (copy_from_user(&vq_info, argp, sizeof(vq_info)))
+ break;
+
+ ret = -EINVAL;
+ if (vq_info.index >= dev->vq_num)
+ break;
+
+ index = array_index_nospec(vq_info.index, dev->vq_num);
+ vq = &dev->vqs[index];
+ vq_info.desc_addr = vq->desc_addr;
+ vq_info.driver_addr = vq->driver_addr;
+ vq_info.device_addr = vq->device_addr;
+ vq_info.num = vq->num;
+
+ if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
+ vq_info.packed.last_avail_counter =
+ vq->state.packed.last_avail_counter;
+ vq_info.packed.last_avail_idx =
+ vq->state.packed.last_avail_idx;
+ vq_info.packed.last_used_counter =
+ vq->state.packed.last_used_counter;
+ vq_info.packed.last_used_idx =
+ vq->state.packed.last_used_idx;
+ } else
+ vq_info.split.avail_index =
+ vq->state.split.avail_index;
+
+ vq_info.ready = vq->ready;
+
+ ret = -EFAULT;
+ if (copy_to_user(argp, &vq_info, sizeof(vq_info)))
+ break;
+
+ ret = 0;
+ break;
+ }
+ case VDUSE_VQ_SETUP_KICKFD: {
+ struct vduse_vq_eventfd eventfd;
+
+ ret = -EFAULT;
+ if (copy_from_user(&eventfd, argp, sizeof(eventfd)))
+ break;
+
+ ret = vduse_kickfd_setup(dev, &eventfd);
+ break;
+ }
+ case VDUSE_VQ_INJECT_IRQ: {
+ u32 index;
+
+ ret = -EFAULT;
+ if (get_user(index, (u32 __user *)argp))
+ break;
+
+ ret = -EINVAL;
+ if (index >= dev->vq_num)
+ break;
+
+ ret = 0;
+ index = array_index_nospec(index, dev->vq_num);
+ queue_work(vduse_irq_wq, &dev->vqs[index].inject);
+ break;
+ }
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ return ret;
+}
+
+static int vduse_dev_release(struct inode *inode, struct file *file)
+{
+ struct vduse_dev *dev = file->private_data;
+
+ spin_lock(&dev->msg_lock);
+ /* Make sure the inflight messages can processed after reconncection */
+ list_splice_init(&dev->recv_list, &dev->send_list);
+ spin_unlock(&dev->msg_lock);
+ dev->connected = false;
+
+ return 0;
+}
+
+static struct vduse_dev *vduse_dev_get_from_minor(int minor)
+{
+ struct vduse_dev *dev;
+
+ mutex_lock(&vduse_lock);
+ dev = idr_find(&vduse_idr, minor);
+ mutex_unlock(&vduse_lock);
+
+ return dev;
+}
+
+static int vduse_dev_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct vduse_dev *dev = vduse_dev_get_from_minor(iminor(inode));
+
+ if (!dev)
+ return -ENODEV;
+
+ ret = -EBUSY;
+ mutex_lock(&dev->lock);
+ if (dev->connected)
+ goto unlock;
+
+ ret = 0;
+ dev->connected = true;
+ file->private_data = dev;
+unlock:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static const struct file_operations vduse_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = vduse_dev_open,
+ .release = vduse_dev_release,
+ .read_iter = vduse_dev_read_iter,
+ .write_iter = vduse_dev_write_iter,
+ .poll = vduse_dev_poll,
+ .unlocked_ioctl = vduse_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct vduse_dev *vduse_dev_create(void)
+{
+ struct vduse_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+ if (!dev)
+ return NULL;
+
+ mutex_init(&dev->lock);
+ spin_lock_init(&dev->msg_lock);
+ INIT_LIST_HEAD(&dev->send_list);
+ INIT_LIST_HEAD(&dev->recv_list);
+ spin_lock_init(&dev->irq_lock);
+
+ INIT_WORK(&dev->inject, vduse_dev_irq_inject);
+ init_waitqueue_head(&dev->waitq);
+
+ return dev;
+}
+
+static void vduse_dev_destroy(struct vduse_dev *dev)
+{
+ kfree(dev);
+}
+
+static struct vduse_dev *vduse_find_dev(const char *name)
+{
+ struct vduse_dev *dev;
+ int id;
+
+ idr_for_each_entry(&vduse_idr, dev, id)
+ if (!strcmp(dev->name, name))
+ return dev;
+
+ return NULL;
+}
+
+static int vduse_destroy_dev(char *name)
+{
+ struct vduse_dev *dev = vduse_find_dev(name);
+
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&dev->lock);
+ if (dev->vdev || dev->connected) {
+ mutex_unlock(&dev->lock);
+ return -EBUSY;
+ }
+ dev->connected = true;
+ mutex_unlock(&dev->lock);
+
+ vduse_dev_reset(dev);
+ device_destroy(vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
+ idr_remove(&vduse_idr, dev->minor);
+ kvfree(dev->config);
+ kfree(dev->vqs);
+ vduse_domain_destroy(dev->domain);
+ kfree(dev->name);
+ vduse_dev_destroy(dev);
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+static bool device_is_allowed(u32 device_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(allowed_device_id); i++)
+ if (allowed_device_id[i] == device_id)
+ return true;
+
+ return false;
+}
+
+static bool features_is_valid(u64 features)
+{
+ if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
+ return false;
+
+ /* Now we only support read-only configuration space */
+ if (features & (1ULL << VIRTIO_BLK_F_CONFIG_WCE))
+ return false;
+
+ return true;
+}
+
+static bool vduse_validate_config(struct vduse_dev_config *config)
+{
+ if (!is_mem_zero((const char *)config->reserved,
+ sizeof(config->reserved)))
+ return false;
+
+ if (config->vq_align > PAGE_SIZE)
+ return false;
+
+ if (config->config_size > PAGE_SIZE)
+ return false;
+
+ if (!device_is_allowed(config->device_id))
+ return false;
+
+ if (!features_is_valid(config->features))
+ return false;
+
+ return true;
+}
+
+static ssize_t msg_timeout_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct vduse_dev *dev = dev_get_drvdata(device);
+
+ return sysfs_emit(buf, "%u\n", dev->msg_timeout);
+}
+
+static ssize_t msg_timeout_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct vduse_dev *dev = dev_get_drvdata(device);
+ int ret;
+
+ ret = kstrtouint(buf, 10, &dev->msg_timeout);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(msg_timeout);
+
+static struct attribute *vduse_dev_attrs[] = {
+ &dev_attr_msg_timeout.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(vduse_dev);
+
+static int vduse_create_dev(struct vduse_dev_config *config,
+ void *config_buf, u64 api_version)
+{
+ int i, ret;
+ struct vduse_dev *dev;
+
+ ret = -EEXIST;
+ if (vduse_find_dev(config->name))
+ goto err;
+
+ ret = -ENOMEM;
+ dev = vduse_dev_create();
+ if (!dev)
+ goto err;
+
+ dev->api_version = api_version;
+ dev->device_features = config->features;
+ dev->device_id = config->device_id;
+ dev->vendor_id = config->vendor_id;
+ dev->name = kstrdup(config->name, GFP_KERNEL);
+ if (!dev->name)
+ goto err_str;
+
+ dev->domain = vduse_domain_create(VDUSE_IOVA_SIZE - 1,
+ VDUSE_BOUNCE_SIZE);
+ if (!dev->domain)
+ goto err_domain;
+
+ dev->config = config_buf;
+ dev->config_size = config->config_size;
+ dev->vq_align = config->vq_align;
+ dev->vq_num = config->vq_num;
+ dev->vqs = kcalloc(dev->vq_num, sizeof(*dev->vqs), GFP_KERNEL);
+ if (!dev->vqs)
+ goto err_vqs;
+
+ for (i = 0; i < dev->vq_num; i++) {
+ dev->vqs[i].index = i;
+ INIT_WORK(&dev->vqs[i].inject, vduse_vq_irq_inject);
+ INIT_WORK(&dev->vqs[i].kick, vduse_vq_kick_work);
+ spin_lock_init(&dev->vqs[i].kick_lock);
+ spin_lock_init(&dev->vqs[i].irq_lock);
+ }
+
+ ret = idr_alloc(&vduse_idr, dev, 1, VDUSE_DEV_MAX, GFP_KERNEL);
+ if (ret < 0)
+ goto err_idr;
+
+ dev->minor = ret;
+ dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT;
+ dev->dev = device_create(vduse_class, NULL,
+ MKDEV(MAJOR(vduse_major), dev->minor),
+ dev, "%s", config->name);
+ if (IS_ERR(dev->dev)) {
+ ret = PTR_ERR(dev->dev);
+ goto err_dev;
+ }
+ __module_get(THIS_MODULE);
+
+ return 0;
+err_dev:
+ idr_remove(&vduse_idr, dev->minor);
+err_idr:
+ kfree(dev->vqs);
+err_vqs:
+ vduse_domain_destroy(dev->domain);
+err_domain:
+ kfree(dev->name);
+err_str:
+ vduse_dev_destroy(dev);
+err:
+ kvfree(config_buf);
+ return ret;
+}
+
+static long vduse_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ void __user *argp = (void __user *)arg;
+ struct vduse_control *control = file->private_data;
+
+ mutex_lock(&vduse_lock);
+ switch (cmd) {
+ case VDUSE_GET_API_VERSION:
+ ret = put_user(control->api_version, (u64 __user *)argp);
+ break;
+ case VDUSE_SET_API_VERSION: {
+ u64 api_version;
+
+ ret = -EFAULT;
+ if (get_user(api_version, (u64 __user *)argp))
+ break;
+
+ ret = -EINVAL;
+ if (api_version > VDUSE_API_VERSION)
+ break;
+
+ ret = 0;
+ control->api_version = api_version;
+ break;
+ }
+ case VDUSE_CREATE_DEV: {
+ struct vduse_dev_config config;
+ unsigned long size = offsetof(struct vduse_dev_config, config);
+ void *buf;
+
+ ret = -EFAULT;
+ if (copy_from_user(&config, argp, size))
+ break;
+
+ ret = -EINVAL;
+ if (vduse_validate_config(&config) == false)
+ break;
+
+ buf = vmemdup_user(argp + size, config.config_size);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ break;
+ }
+ config.name[VDUSE_NAME_MAX - 1] = '\0';
+ ret = vduse_create_dev(&config, buf, control->api_version);
+ break;
+ }
+ case VDUSE_DESTROY_DEV: {
+ char name[VDUSE_NAME_MAX];
+
+ ret = -EFAULT;
+ if (copy_from_user(name, argp, VDUSE_NAME_MAX))
+ break;
+
+ name[VDUSE_NAME_MAX - 1] = '\0';
+ ret = vduse_destroy_dev(name);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&vduse_lock);
+
+ return ret;
+}
+
+static int vduse_release(struct inode *inode, struct file *file)
+{
+ struct vduse_control *control = file->private_data;
+
+ kfree(control);
+ return 0;
+}
+
+static int vduse_open(struct inode *inode, struct file *file)
+{
+ struct vduse_control *control;
+
+ control = kmalloc(sizeof(struct vduse_control), GFP_KERNEL);
+ if (!control)
+ return -ENOMEM;
+
+ control->api_version = VDUSE_API_VERSION;
+ file->private_data = control;
+
+ return 0;
+}
+
+static const struct file_operations vduse_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = vduse_open,
+ .release = vduse_release,
+ .unlocked_ioctl = vduse_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = noop_llseek,
+};
+
+static char *vduse_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "vduse/%s", dev_name(dev));
+}
+
+static void vduse_mgmtdev_release(struct device *dev)
+{
+}
+
+static struct device vduse_mgmtdev = {
+ .init_name = "vduse",
+ .release = vduse_mgmtdev_release,
+};
+
+static struct vdpa_mgmt_dev mgmt_dev;
+
+static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
+{
+ struct vduse_vdpa *vdev;
+ int ret;
+
+ if (dev->vdev)
+ return -EEXIST;
+
+ vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
+ &vduse_vdpa_config_ops, name, true);
+ if (IS_ERR(vdev))
+ return PTR_ERR(vdev);
+
+ dev->vdev = vdev;
+ vdev->dev = dev;
+ vdev->vdpa.dev.dma_mask = &vdev->vdpa.dev.coherent_dma_mask;
+ ret = dma_set_mask_and_coherent(&vdev->vdpa.dev, DMA_BIT_MASK(64));
+ if (ret) {
+ put_device(&vdev->vdpa.dev);
+ return ret;
+ }
+ set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops);
+ vdev->vdpa.dma_dev = &vdev->vdpa.dev;
+ vdev->vdpa.mdev = &mgmt_dev;
+
+ return 0;
+}
+
+static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
+{
+ struct vduse_dev *dev;
+ int ret;
+
+ mutex_lock(&vduse_lock);
+ dev = vduse_find_dev(name);
+ if (!dev || !vduse_dev_is_ready(dev)) {
+ mutex_unlock(&vduse_lock);
+ return -EINVAL;
+ }
+ ret = vduse_dev_init_vdpa(dev, name);
+ mutex_unlock(&vduse_lock);
+ if (ret)
+ return ret;
+
+ ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num);
+ if (ret) {
+ put_device(&dev->vdev->vdpa.dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
+{
+ _vdpa_unregister_device(dev);
+}
+
+static const struct vdpa_mgmtdev_ops vdpa_dev_mgmtdev_ops = {
+ .dev_add = vdpa_dev_add,
+ .dev_del = vdpa_dev_del,
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct vdpa_mgmt_dev mgmt_dev = {
+ .device = &vduse_mgmtdev,
+ .id_table = id_table,
+ .ops = &vdpa_dev_mgmtdev_ops,
+};
+
+static int vduse_mgmtdev_init(void)
+{
+ int ret;
+
+ ret = device_register(&vduse_mgmtdev);
+ if (ret)
+ return ret;
+
+ ret = vdpa_mgmtdev_register(&mgmt_dev);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ device_unregister(&vduse_mgmtdev);
+ return ret;
+}
+
+static void vduse_mgmtdev_exit(void)
+{
+ vdpa_mgmtdev_unregister(&mgmt_dev);
+ device_unregister(&vduse_mgmtdev);
+}
+
+static int vduse_init(void)
+{
+ int ret;
+ struct device *dev;
+
+ vduse_class = class_create(THIS_MODULE, "vduse");
+ if (IS_ERR(vduse_class))
+ return PTR_ERR(vduse_class);
+
+ vduse_class->devnode = vduse_devnode;
+ vduse_class->dev_groups = vduse_dev_groups;
+
+ ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse");
+ if (ret)
+ goto err_chardev_region;
+
+ /* /dev/vduse/control */
+ cdev_init(&vduse_ctrl_cdev, &vduse_ctrl_fops);
+ vduse_ctrl_cdev.owner = THIS_MODULE;
+ ret = cdev_add(&vduse_ctrl_cdev, vduse_major, 1);
+ if (ret)
+ goto err_ctrl_cdev;
+
+ dev = device_create(vduse_class, NULL, vduse_major, NULL, "control");
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto err_device;
+ }
+
+ /* /dev/vduse/$DEVICE */
+ cdev_init(&vduse_cdev, &vduse_dev_fops);
+ vduse_cdev.owner = THIS_MODULE;
+ ret = cdev_add(&vduse_cdev, MKDEV(MAJOR(vduse_major), 1),
+ VDUSE_DEV_MAX - 1);
+ if (ret)
+ goto err_cdev;
+
+ vduse_irq_wq = alloc_workqueue("vduse-irq",
+ WQ_HIGHPRI | WQ_SYSFS | WQ_UNBOUND, 0);
+ if (!vduse_irq_wq)
+ goto err_wq;
+
+ ret = vduse_domain_init();
+ if (ret)
+ goto err_domain;
+
+ ret = vduse_mgmtdev_init();
+ if (ret)
+ goto err_mgmtdev;
+
+ return 0;
+err_mgmtdev:
+ vduse_domain_exit();
+err_domain:
+ destroy_workqueue(vduse_irq_wq);
+err_wq:
+ cdev_del(&vduse_cdev);
+err_cdev:
+ device_destroy(vduse_class, vduse_major);
+err_device:
+ cdev_del(&vduse_ctrl_cdev);
+err_ctrl_cdev:
+ unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX);
+err_chardev_region:
+ class_destroy(vduse_class);
+ return ret;
+}
+module_init(vduse_init);
+
+static void vduse_exit(void)
+{
+ vduse_mgmtdev_exit();
+ vduse_domain_exit();
+ destroy_workqueue(vduse_irq_wq);
+ cdev_del(&vduse_cdev);
+ device_destroy(vduse_class, vduse_major);
+ cdev_del(&vduse_ctrl_cdev);
+ unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX);
+ class_destroy(vduse_class);
+}
+module_exit(vduse_exit);
+
+MODULE_LICENSE(DRV_LICENSE);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index fe0527329857..5bcd00246d2e 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -189,10 +189,20 @@ static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
}
vp_modern_set_status(mdev, status);
+}
- if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
- (s & VIRTIO_CONFIG_S_DRIVER_OK))
+static int vp_vdpa_reset(struct vdpa_device *vdpa)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ u8 s = vp_vdpa_get_status(vdpa);
+
+ vp_modern_set_status(mdev, 0);
+
+ if (s & VIRTIO_CONFIG_S_DRIVER_OK)
vp_vdpa_free_irq(vp_vdpa);
+
+ return 0;
}
static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
@@ -398,6 +408,7 @@ static const struct vdpa_config_ops vp_vdpa_ops = {
.set_features = vp_vdpa_set_features,
.get_status = vp_vdpa_get_status,
.set_status = vp_vdpa_set_status,
+ .reset = vp_vdpa_reset,
.get_vq_num_max = vp_vdpa_get_vq_num_max,
.get_vq_state = vp_vdpa_get_vq_state,
.get_vq_notification = vp_vdpa_get_vq_notification,
@@ -435,7 +446,7 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
- dev, &vp_vdpa_ops, NULL);
+ dev, &vp_vdpa_ops, NULL, false);
if (IS_ERR(vp_vdpa)) {
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
return PTR_ERR(vp_vdpa);
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index 67d0bf4efa16..6130d00252ed 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -1,12 +1,22 @@
# SPDX-License-Identifier: GPL-2.0-only
+menuconfig VFIO
+ tristate "VFIO Non-Privileged userspace driver framework"
+ select IOMMU_API
+ select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64)
+ help
+ VFIO provides a framework for secure userspace device drivers.
+ See Documentation/driver-api/vfio.rst for more details.
+
+ If you don't know what to do here, say N.
+
+if VFIO
config VFIO_IOMMU_TYPE1
tristate
- depends on VFIO
default n
config VFIO_IOMMU_SPAPR_TCE
tristate
- depends on VFIO && SPAPR_TCE_IOMMU
+ depends on SPAPR_TCE_IOMMU
default VFIO
config VFIO_SPAPR_EEH
@@ -16,22 +26,11 @@ config VFIO_SPAPR_EEH
config VFIO_VIRQFD
tristate
- depends on VFIO && EVENTFD
+ select EVENTFD
default n
-menuconfig VFIO
- tristate "VFIO Non-Privileged userspace driver framework"
- select IOMMU_API
- select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64)
- help
- VFIO provides a framework for secure userspace device drivers.
- See Documentation/driver-api/vfio.rst for more details.
-
- If you don't know what to do here, say N.
-
-menuconfig VFIO_NOIOMMU
+config VFIO_NOIOMMU
bool "VFIO No-IOMMU support"
- depends on VFIO
help
VFIO is built on the ability to isolate devices using the IOMMU.
Only with an IOMMU can userspace access to DMA capable devices be
@@ -48,4 +47,6 @@ source "drivers/vfio/pci/Kconfig"
source "drivers/vfio/platform/Kconfig"
source "drivers/vfio/mdev/Kconfig"
source "drivers/vfio/fsl-mc/Kconfig"
+endif
+
source "virt/lib/Kconfig"
diff --git a/drivers/vfio/fsl-mc/Kconfig b/drivers/vfio/fsl-mc/Kconfig
index b1a527d6b6f2..597d338c5c8a 100644
--- a/drivers/vfio/fsl-mc/Kconfig
+++ b/drivers/vfio/fsl-mc/Kconfig
@@ -1,6 +1,7 @@
config VFIO_FSL_MC
tristate "VFIO support for QorIQ DPAA2 fsl-mc bus devices"
- depends on VFIO && FSL_MC_BUS && EVENTFD
+ depends on FSL_MC_BUS
+ select EVENTFD
help
Driver to enable support for the VFIO QorIQ DPAA2 fsl-mc
(Management Complex) devices. This is required to passthrough
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
index 90cad109583b..0ead91bfa838 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
@@ -19,81 +19,10 @@
static struct fsl_mc_driver vfio_fsl_mc_driver;
-static DEFINE_MUTEX(reflck_lock);
-
-static void vfio_fsl_mc_reflck_get(struct vfio_fsl_mc_reflck *reflck)
-{
- kref_get(&reflck->kref);
-}
-
-static void vfio_fsl_mc_reflck_release(struct kref *kref)
-{
- struct vfio_fsl_mc_reflck *reflck = container_of(kref,
- struct vfio_fsl_mc_reflck,
- kref);
-
- mutex_destroy(&reflck->lock);
- kfree(reflck);
- mutex_unlock(&reflck_lock);
-}
-
-static void vfio_fsl_mc_reflck_put(struct vfio_fsl_mc_reflck *reflck)
-{
- kref_put_mutex(&reflck->kref, vfio_fsl_mc_reflck_release, &reflck_lock);
-}
-
-static struct vfio_fsl_mc_reflck *vfio_fsl_mc_reflck_alloc(void)
-{
- struct vfio_fsl_mc_reflck *reflck;
-
- reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
- if (!reflck)
- return ERR_PTR(-ENOMEM);
-
- kref_init(&reflck->kref);
- mutex_init(&reflck->lock);
-
- return reflck;
-}
-
-static int vfio_fsl_mc_reflck_attach(struct vfio_fsl_mc_device *vdev)
-{
- int ret = 0;
-
- mutex_lock(&reflck_lock);
- if (is_fsl_mc_bus_dprc(vdev->mc_dev)) {
- vdev->reflck = vfio_fsl_mc_reflck_alloc();
- ret = PTR_ERR_OR_ZERO(vdev->reflck);
- } else {
- struct device *mc_cont_dev = vdev->mc_dev->dev.parent;
- struct vfio_device *device;
- struct vfio_fsl_mc_device *cont_vdev;
-
- device = vfio_device_get_from_dev(mc_cont_dev);
- if (!device) {
- ret = -ENODEV;
- goto unlock;
- }
-
- cont_vdev =
- container_of(device, struct vfio_fsl_mc_device, vdev);
- if (!cont_vdev || !cont_vdev->reflck) {
- vfio_device_put(device);
- ret = -ENODEV;
- goto unlock;
- }
- vfio_fsl_mc_reflck_get(cont_vdev->reflck);
- vdev->reflck = cont_vdev->reflck;
- vfio_device_put(device);
- }
-
-unlock:
- mutex_unlock(&reflck_lock);
- return ret;
-}
-
-static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
+static int vfio_fsl_mc_open_device(struct vfio_device *core_vdev)
{
+ struct vfio_fsl_mc_device *vdev =
+ container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
struct fsl_mc_device *mc_dev = vdev->mc_dev;
int count = mc_dev->obj_desc.region_count;
int i;
@@ -136,58 +65,30 @@ static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
kfree(vdev->regions);
}
-static int vfio_fsl_mc_open(struct vfio_device *core_vdev)
-{
- struct vfio_fsl_mc_device *vdev =
- container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
- int ret = 0;
-
- mutex_lock(&vdev->reflck->lock);
- if (!vdev->refcnt) {
- ret = vfio_fsl_mc_regions_init(vdev);
- if (ret)
- goto out;
- }
- vdev->refcnt++;
-out:
- mutex_unlock(&vdev->reflck->lock);
-
- return ret;
-}
-static void vfio_fsl_mc_release(struct vfio_device *core_vdev)
+static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev)
{
struct vfio_fsl_mc_device *vdev =
container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
+ struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
+ struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
int ret;
- mutex_lock(&vdev->reflck->lock);
+ vfio_fsl_mc_regions_cleanup(vdev);
- if (!(--vdev->refcnt)) {
- struct fsl_mc_device *mc_dev = vdev->mc_dev;
- struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
- struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
+ /* reset the device before cleaning up the interrupts */
+ ret = dprc_reset_container(mc_cont->mc_io, 0, mc_cont->mc_handle,
+ mc_cont->obj_desc.id,
+ DPRC_RESET_OPTION_NON_RECURSIVE);
- vfio_fsl_mc_regions_cleanup(vdev);
+ if (WARN_ON(ret))
+ dev_warn(&mc_cont->dev,
+ "VFIO_FLS_MC: reset device has failed (%d)\n", ret);
- /* reset the device before cleaning up the interrupts */
- ret = dprc_reset_container(mc_cont->mc_io, 0,
- mc_cont->mc_handle,
- mc_cont->obj_desc.id,
- DPRC_RESET_OPTION_NON_RECURSIVE);
+ vfio_fsl_mc_irqs_cleanup(vdev);
- if (ret) {
- dev_warn(&mc_cont->dev, "VFIO_FLS_MC: reset device has failed (%d)\n",
- ret);
- WARN_ON(1);
- }
-
- vfio_fsl_mc_irqs_cleanup(vdev);
-
- fsl_mc_cleanup_irq_pool(mc_cont);
- }
-
- mutex_unlock(&vdev->reflck->lock);
+ fsl_mc_cleanup_irq_pool(mc_cont);
}
static long vfio_fsl_mc_ioctl(struct vfio_device *core_vdev,
@@ -504,8 +405,8 @@ static int vfio_fsl_mc_mmap(struct vfio_device *core_vdev,
static const struct vfio_device_ops vfio_fsl_mc_ops = {
.name = "vfio-fsl-mc",
- .open = vfio_fsl_mc_open,
- .release = vfio_fsl_mc_release,
+ .open_device = vfio_fsl_mc_open_device,
+ .close_device = vfio_fsl_mc_close_device,
.ioctl = vfio_fsl_mc_ioctl,
.read = vfio_fsl_mc_read,
.write = vfio_fsl_mc_write,
@@ -625,13 +526,16 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
vdev->mc_dev = mc_dev;
mutex_init(&vdev->igate);
- ret = vfio_fsl_mc_reflck_attach(vdev);
+ if (is_fsl_mc_bus_dprc(mc_dev))
+ ret = vfio_assign_device_set(&vdev->vdev, &mc_dev->dev);
+ else
+ ret = vfio_assign_device_set(&vdev->vdev, mc_dev->dev.parent);
if (ret)
- goto out_kfree;
+ goto out_uninit;
ret = vfio_fsl_mc_init_device(vdev);
if (ret)
- goto out_reflck;
+ goto out_uninit;
ret = vfio_register_group_dev(&vdev->vdev);
if (ret) {
@@ -639,12 +543,6 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
goto out_device;
}
- /*
- * This triggers recursion into vfio_fsl_mc_probe() on another device
- * and the vfio_fsl_mc_reflck_attach() must succeed, which relies on the
- * vfio_add_group_dev() above. It has no impact on this vdev, so it is
- * safe to be after the vfio device is made live.
- */
ret = vfio_fsl_mc_scan_container(mc_dev);
if (ret)
goto out_group_dev;
@@ -655,9 +553,8 @@ out_group_dev:
vfio_unregister_group_dev(&vdev->vdev);
out_device:
vfio_fsl_uninit_device(vdev);
-out_reflck:
- vfio_fsl_mc_reflck_put(vdev->reflck);
-out_kfree:
+out_uninit:
+ vfio_uninit_group_dev(&vdev->vdev);
kfree(vdev);
out_group_put:
vfio_iommu_group_put(group, dev);
@@ -674,8 +571,8 @@ static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
dprc_remove_devices(mc_dev, NULL, 0);
vfio_fsl_uninit_device(vdev);
- vfio_fsl_mc_reflck_put(vdev->reflck);
+ vfio_uninit_group_dev(&vdev->vdev);
kfree(vdev);
vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
index 0d9f3002df7f..77e584093a23 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
@@ -120,7 +120,7 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
if (start != 0 || count != 1)
return -EINVAL;
- mutex_lock(&vdev->reflck->lock);
+ mutex_lock(&vdev->vdev.dev_set->lock);
ret = fsl_mc_populate_irq_pool(mc_cont,
FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
if (ret)
@@ -129,7 +129,7 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
ret = vfio_fsl_mc_irqs_allocate(vdev);
if (ret)
goto unlock;
- mutex_unlock(&vdev->reflck->lock);
+ mutex_unlock(&vdev->vdev.dev_set->lock);
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
s32 fd = *(s32 *)data;
@@ -154,7 +154,7 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
return 0;
unlock:
- mutex_unlock(&vdev->reflck->lock);
+ mutex_unlock(&vdev->vdev.dev_set->lock);
return ret;
}
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
index 89700e00e77d..4ad63ececb91 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
@@ -22,11 +22,6 @@ struct vfio_fsl_mc_irq {
char *name;
};
-struct vfio_fsl_mc_reflck {
- struct kref kref;
- struct mutex lock;
-};
-
struct vfio_fsl_mc_region {
u32 flags;
u32 type;
@@ -39,9 +34,7 @@ struct vfio_fsl_mc_device {
struct vfio_device vdev;
struct fsl_mc_device *mc_dev;
struct notifier_block nb;
- int refcnt;
struct vfio_fsl_mc_region *regions;
- struct vfio_fsl_mc_reflck *reflck;
struct mutex igate;
struct vfio_fsl_mc_irq *mc_irqs;
};
diff --git a/drivers/vfio/mdev/Kconfig b/drivers/vfio/mdev/Kconfig
index 763c877a1318..646dbed44eb2 100644
--- a/drivers/vfio/mdev/Kconfig
+++ b/drivers/vfio/mdev/Kconfig
@@ -2,7 +2,6 @@
config VFIO_MDEV
tristate "Mediated device driver framework"
- depends on VFIO
default n
help
Provides a framework to virtualize devices.
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index e4581ec093a6..b314101237fe 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -138,10 +138,6 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
if (!dev)
return -EINVAL;
- /* Not mandatory, but its absence could be a problem */
- if (!ops->request)
- dev_info(dev, "Driver cannot be asked to release device\n");
-
mutex_lock(&parent_list_lock);
/* Check for duplicate */
@@ -398,7 +394,7 @@ static void __exit mdev_exit(void)
mdev_bus_unregister();
}
-module_init(mdev_init)
+subsys_initcall(mdev_init)
module_exit(mdev_exit)
MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c
index 39ef7489fe47..7a9883048216 100644
--- a/drivers/vfio/mdev/vfio_mdev.c
+++ b/drivers/vfio/mdev/vfio_mdev.c
@@ -17,24 +17,24 @@
#include "mdev_private.h"
-static int vfio_mdev_open(struct vfio_device *core_vdev)
+static int vfio_mdev_open_device(struct vfio_device *core_vdev)
{
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
struct mdev_parent *parent = mdev->type->parent;
- if (unlikely(!parent->ops->open))
- return -EINVAL;
+ if (unlikely(!parent->ops->open_device))
+ return 0;
- return parent->ops->open(mdev);
+ return parent->ops->open_device(mdev);
}
-static void vfio_mdev_release(struct vfio_device *core_vdev)
+static void vfio_mdev_close_device(struct vfio_device *core_vdev)
{
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
struct mdev_parent *parent = mdev->type->parent;
- if (likely(parent->ops->release))
- parent->ops->release(mdev);
+ if (likely(parent->ops->close_device))
+ parent->ops->close_device(mdev);
}
static long vfio_mdev_unlocked_ioctl(struct vfio_device *core_vdev,
@@ -44,7 +44,7 @@ static long vfio_mdev_unlocked_ioctl(struct vfio_device *core_vdev,
struct mdev_parent *parent = mdev->type->parent;
if (unlikely(!parent->ops->ioctl))
- return -EINVAL;
+ return 0;
return parent->ops->ioctl(mdev, cmd, arg);
}
@@ -100,8 +100,8 @@ static void vfio_mdev_request(struct vfio_device *core_vdev, unsigned int count)
static const struct vfio_device_ops vfio_mdev_dev_ops = {
.name = "vfio-mdev",
- .open = vfio_mdev_open,
- .release = vfio_mdev_release,
+ .open_device = vfio_mdev_open_device,
+ .close_device = vfio_mdev_close_device,
.ioctl = vfio_mdev_unlocked_ioctl,
.read = vfio_mdev_read,
.write = vfio_mdev_write,
@@ -120,12 +120,16 @@ static int vfio_mdev_probe(struct mdev_device *mdev)
vfio_init_group_dev(vdev, &mdev->dev, &vfio_mdev_dev_ops);
ret = vfio_register_group_dev(vdev);
- if (ret) {
- kfree(vdev);
- return ret;
- }
+ if (ret)
+ goto out_uninit;
+
dev_set_drvdata(&mdev->dev, vdev);
return 0;
+
+out_uninit:
+ vfio_uninit_group_dev(vdev);
+ kfree(vdev);
+ return ret;
}
static void vfio_mdev_remove(struct mdev_device *mdev)
@@ -133,6 +137,7 @@ static void vfio_mdev_remove(struct mdev_device *mdev)
struct vfio_device *vdev = dev_get_drvdata(&mdev->dev);
vfio_unregister_group_dev(vdev);
+ vfio_uninit_group_dev(vdev);
kfree(vdev);
}
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 5e2e1b9a9fd3..860424ccda1b 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -1,19 +1,29 @@
# SPDX-License-Identifier: GPL-2.0-only
-config VFIO_PCI
- tristate "VFIO support for PCI devices"
- depends on VFIO && PCI && EVENTFD
- depends on MMU
+if PCI && MMU
+config VFIO_PCI_CORE
+ tristate
select VFIO_VIRQFD
select IRQ_BYPASS_MANAGER
+
+config VFIO_PCI_MMAP
+ def_bool y if !S390
+
+config VFIO_PCI_INTX
+ def_bool y if !S390
+
+config VFIO_PCI
+ tristate "Generic VFIO support for any PCI device"
+ select VFIO_PCI_CORE
help
- Support for the PCI VFIO bus driver. This is required to make
- use of PCI drivers using the VFIO framework.
+ Support for the generic PCI VFIO bus driver which can connect any
+ PCI device to the VFIO framework.
If you don't know what to do here, say N.
+if VFIO_PCI
config VFIO_PCI_VGA
- bool "VFIO PCI support for VGA devices"
- depends on VFIO_PCI && X86 && VGA_ARB
+ bool "Generic VFIO PCI support for VGA devices"
+ depends on X86 && VGA_ARB
help
Support for VGA extension to VFIO PCI. This exposes an additional
region on VGA devices for accessing legacy VGA addresses used by
@@ -21,17 +31,9 @@ config VFIO_PCI_VGA
If you don't know what to do here, say N.
-config VFIO_PCI_MMAP
- depends on VFIO_PCI
- def_bool y if !S390
-
-config VFIO_PCI_INTX
- depends on VFIO_PCI
- def_bool y if !S390
-
config VFIO_PCI_IGD
- bool "VFIO PCI extensions for Intel graphics (GVT-d)"
- depends on VFIO_PCI && X86
+ bool "Generic VFIO PCI extensions for Intel graphics (GVT-d)"
+ depends on X86
default y
help
Support for Intel IGD specific extensions to enable direct
@@ -40,3 +42,5 @@ config VFIO_PCI_IGD
and LPC bridge config space.
To enable Intel IGD assignment through vfio-pci, say Y.
+endif
+endif
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index 3ff42093962f..349d68d242b4 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
-vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o
-vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o
-vfio-pci-$(CONFIG_S390) += vfio_pci_zdev.o
+vfio-pci-core-y := vfio_pci_core.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o
+vfio-pci-core-$(CONFIG_S390) += vfio_pci_zdev.o
+obj-$(CONFIG_VFIO_PCI_CORE) += vfio-pci-core.o
+vfio-pci-y := vfio_pci.o
+vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o
obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index cf27df8048db..a5ce92beb655 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
+ * Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ *
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*
@@ -18,19 +20,13 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
-#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
-#include <linux/vfio.h>
-#include <linux/vgaarb.h>
-#include <linux/nospec.h>
-#include <linux/sched/mm.h>
-#include "vfio_pci_private.h"
+#include <linux/vfio_pci_core.h>
-#define DRIVER_VERSION "0.2"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
#define DRIVER_DESC "VFIO PCI - User Level meta-driver"
@@ -64,15 +60,6 @@ static bool disable_denylist;
module_param(disable_denylist, bool, 0444);
MODULE_PARM_DESC(disable_denylist, "Disable use of device denylist. Disabling the denylist allows binding to devices with known errata that may lead to exploitable stability or security issues when accessed by untrusted users.");
-static inline bool vfio_vga_disabled(void)
-{
-#ifdef CONFIG_VFIO_PCI_VGA
- return disable_vga;
-#else
- return true;
-#endif
-}
-
static bool vfio_pci_dev_in_denylist(struct pci_dev *pdev)
{
switch (pdev->vendor) {
@@ -111,2269 +98,103 @@ static bool vfio_pci_is_denylisted(struct pci_dev *pdev)
return true;
}
-/*
- * Our VGA arbiter participation is limited since we don't know anything
- * about the device itself. However, if the device is the only VGA device
- * downstream of a bridge and VFIO VGA support is disabled, then we can
- * safely return legacy VGA IO and memory as not decoded since the user
- * has no way to get to it and routing can be disabled externally at the
- * bridge.
- */
-static unsigned int vfio_pci_set_decode(struct pci_dev *pdev, bool single_vga)
-{
- struct pci_dev *tmp = NULL;
- unsigned char max_busnr;
- unsigned int decodes;
-
- if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
- return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
- VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
-
- max_busnr = pci_bus_max_busnr(pdev->bus);
- decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
-
- while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
- if (tmp == pdev ||
- pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
- pci_is_root_bus(tmp->bus))
- continue;
-
- if (tmp->bus->number >= pdev->bus->number &&
- tmp->bus->number <= max_busnr) {
- pci_dev_put(tmp);
- decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
- break;
- }
- }
-
- return decodes;
-}
-
-static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
-{
- return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
-}
-
-static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
-{
- struct resource *res;
- int i;
- struct vfio_pci_dummy_resource *dummy_res;
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- int bar = i + PCI_STD_RESOURCES;
-
- res = &vdev->pdev->resource[bar];
-
- if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
- goto no_mmap;
-
- if (!(res->flags & IORESOURCE_MEM))
- goto no_mmap;
-
- /*
- * The PCI core shouldn't set up a resource with a
- * type but zero size. But there may be bugs that
- * cause us to do that.
- */
- if (!resource_size(res))
- goto no_mmap;
-
- if (resource_size(res) >= PAGE_SIZE) {
- vdev->bar_mmap_supported[bar] = true;
- continue;
- }
-
- if (!(res->start & ~PAGE_MASK)) {
- /*
- * Add a dummy resource to reserve the remainder
- * of the exclusive page in case that hot-add
- * device's bar is assigned into it.
- */
- dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
- if (dummy_res == NULL)
- goto no_mmap;
-
- dummy_res->resource.name = "vfio sub-page reserved";
- dummy_res->resource.start = res->end + 1;
- dummy_res->resource.end = res->start + PAGE_SIZE - 1;
- dummy_res->resource.flags = res->flags;
- if (request_resource(res->parent,
- &dummy_res->resource)) {
- kfree(dummy_res);
- goto no_mmap;
- }
- dummy_res->index = bar;
- list_add(&dummy_res->res_next,
- &vdev->dummy_resources_list);
- vdev->bar_mmap_supported[bar] = true;
- continue;
- }
- /*
- * Here we don't handle the case when the BAR is not page
- * aligned because we can't expect the BAR will be
- * assigned into the same location in a page in guest
- * when we passthrough the BAR. And it's hard to access
- * this BAR in userspace because we have no way to get
- * the BAR's location in a page.
- */
-no_mmap:
- vdev->bar_mmap_supported[bar] = false;
- }
-}
-
-static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
-static void vfio_pci_disable(struct vfio_pci_device *vdev);
-static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
-
-/*
- * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
- * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
- * If a device implements the former but not the latter we would typically
- * expect broken_intx_masking be set and require an exclusive interrupt.
- * However since we do have control of the device's ability to assert INTx,
- * we can instead pretend that the device does not implement INTx, virtualizing
- * the pin register to report zero and maintaining DisINTx set on the host.
- */
-static bool vfio_pci_nointx(struct pci_dev *pdev)
-{
- switch (pdev->vendor) {
- case PCI_VENDOR_ID_INTEL:
- switch (pdev->device) {
- /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
- case 0x1572:
- case 0x1574:
- case 0x1580 ... 0x1581:
- case 0x1583 ... 0x158b:
- case 0x37d0 ... 0x37d2:
- /* X550 */
- case 0x1563:
- return true;
- default:
- return false;
- }
- }
-
- return false;
-}
-
-static void vfio_pci_probe_power_state(struct vfio_pci_device *vdev)
+static int vfio_pci_open_device(struct vfio_device *core_vdev)
{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
struct pci_dev *pdev = vdev->pdev;
- u16 pmcsr;
-
- if (!pdev->pm_cap)
- return;
-
- pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
-
- vdev->needs_pm_restore = !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
-}
-
-/*
- * pci_set_power_state() wrapper handling devices which perform a soft reset on
- * D3->D0 transition. Save state prior to D0/1/2->D3, stash it on the vdev,
- * restore when returned to D0. Saved separately from pci_saved_state for use
- * by PM capability emulation and separately from pci_dev internal saved state
- * to avoid it being overwritten and consumed around other resets.
- */
-int vfio_pci_set_power_state(struct vfio_pci_device *vdev, pci_power_t state)
-{
- struct pci_dev *pdev = vdev->pdev;
- bool needs_restore = false, needs_save = false;
int ret;
- if (vdev->needs_pm_restore) {
- if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
- pci_save_state(pdev);
- needs_save = true;
- }
-
- if (pdev->current_state >= PCI_D3hot && state <= PCI_D0)
- needs_restore = true;
- }
-
- ret = pci_set_power_state(pdev, state);
-
- if (!ret) {
- /* D3 might be unsupported via quirk, skip unless in D3 */
- if (needs_save && pdev->current_state >= PCI_D3hot) {
- vdev->pm_save = pci_store_saved_state(pdev);
- } else if (needs_restore) {
- pci_load_and_free_saved_state(pdev, &vdev->pm_save);
- pci_restore_state(pdev);
- }
- }
-
- return ret;
-}
-
-static int vfio_pci_enable(struct vfio_pci_device *vdev)
-{
- struct pci_dev *pdev = vdev->pdev;
- int ret;
- u16 cmd;
- u8 msix_pos;
-
- vfio_pci_set_power_state(vdev, PCI_D0);
-
- /* Don't allow our initial saved state to include busmaster */
- pci_clear_master(pdev);
-
- ret = pci_enable_device(pdev);
+ ret = vfio_pci_core_enable(vdev);
if (ret)
return ret;
- /* If reset fails because of the device lock, fail this path entirely */
- ret = pci_try_reset_function(pdev);
- if (ret == -EAGAIN) {
- pci_disable_device(pdev);
- return ret;
- }
-
- vdev->reset_works = !ret;
- pci_save_state(pdev);
- vdev->pci_saved_state = pci_store_saved_state(pdev);
- if (!vdev->pci_saved_state)
- pci_dbg(pdev, "%s: Couldn't store saved state\n", __func__);
-
- if (likely(!nointxmask)) {
- if (vfio_pci_nointx(pdev)) {
- pci_info(pdev, "Masking broken INTx support\n");
- vdev->nointx = true;
- pci_intx(pdev, 0);
- } else
- vdev->pci_2_3 = pci_intx_mask_supported(pdev);
- }
-
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
- cmd &= ~PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(pdev, PCI_COMMAND, cmd);
- }
-
- ret = vfio_config_init(vdev);
- if (ret) {
- kfree(vdev->pci_saved_state);
- vdev->pci_saved_state = NULL;
- pci_disable_device(pdev);
- return ret;
- }
-
- msix_pos = pdev->msix_cap;
- if (msix_pos) {
- u16 flags;
- u32 table;
-
- pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
- pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
-
- vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
- vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
- vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
- } else
- vdev->msix_bar = 0xFF;
-
- if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
- vdev->has_vga = true;
-
if (vfio_pci_is_vga(pdev) &&
pdev->vendor == PCI_VENDOR_ID_INTEL &&
IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
ret = vfio_pci_igd_init(vdev);
if (ret && ret != -ENODEV) {
pci_warn(pdev, "Failed to setup Intel IGD regions\n");
- goto disable_exit;
- }
- }
-
- vfio_pci_probe_mmaps(vdev);
-
- return 0;
-
-disable_exit:
- vfio_pci_disable(vdev);
- return ret;
-}
-
-static void vfio_pci_disable(struct vfio_pci_device *vdev)
-{
- struct pci_dev *pdev = vdev->pdev;
- struct vfio_pci_dummy_resource *dummy_res, *tmp;
- struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
- int i, bar;
-
- /* Stop the device from further DMA */
- pci_clear_master(pdev);
-
- vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
- VFIO_IRQ_SET_ACTION_TRIGGER,
- vdev->irq_type, 0, 0, NULL);
-
- /* Device closed, don't need mutex here */
- list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
- &vdev->ioeventfds_list, next) {
- vfio_virqfd_disable(&ioeventfd->virqfd);
- list_del(&ioeventfd->next);
- kfree(ioeventfd);
- }
- vdev->ioeventfds_nr = 0;
-
- vdev->virq_disabled = false;
-
- for (i = 0; i < vdev->num_regions; i++)
- vdev->region[i].ops->release(vdev, &vdev->region[i]);
-
- vdev->num_regions = 0;
- kfree(vdev->region);
- vdev->region = NULL; /* don't krealloc a freed pointer */
-
- vfio_config_free(vdev);
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- bar = i + PCI_STD_RESOURCES;
- if (!vdev->barmap[bar])
- continue;
- pci_iounmap(pdev, vdev->barmap[bar]);
- pci_release_selected_regions(pdev, 1 << bar);
- vdev->barmap[bar] = NULL;
- }
-
- list_for_each_entry_safe(dummy_res, tmp,
- &vdev->dummy_resources_list, res_next) {
- list_del(&dummy_res->res_next);
- release_resource(&dummy_res->resource);
- kfree(dummy_res);
- }
-
- vdev->needs_reset = true;
-
- /*
- * If we have saved state, restore it. If we can reset the device,
- * even better. Resetting with current state seems better than
- * nothing, but saving and restoring current state without reset
- * is just busy work.
- */
- if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
- pci_info(pdev, "%s: Couldn't reload saved state\n", __func__);
-
- if (!vdev->reset_works)
- goto out;
-
- pci_save_state(pdev);
- }
-
- /*
- * Disable INTx and MSI, presumably to avoid spurious interrupts
- * during reset. Stolen from pci_reset_function()
- */
- pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
-
- /*
- * Try to get the locks ourselves to prevent a deadlock. The
- * success of this is dependent on being able to lock the device,
- * which is not always possible.
- * We can not use the "try" reset interface here, which will
- * overwrite the previously restored configuration information.
- */
- if (vdev->reset_works && pci_dev_trylock(pdev)) {
- if (!__pci_reset_function_locked(pdev))
- vdev->needs_reset = false;
- pci_dev_unlock(pdev);
- }
-
- pci_restore_state(pdev);
-out:
- pci_disable_device(pdev);
-
- vfio_pci_try_bus_reset(vdev);
-
- if (!disable_idle_d3)
- vfio_pci_set_power_state(vdev, PCI_D3hot);
-}
-
-static struct pci_driver vfio_pci_driver;
-
-static struct vfio_pci_device *get_pf_vdev(struct vfio_pci_device *vdev)
-{
- struct pci_dev *physfn = pci_physfn(vdev->pdev);
- struct vfio_device *pf_dev;
-
- if (!vdev->pdev->is_virtfn)
- return NULL;
-
- pf_dev = vfio_device_get_from_dev(&physfn->dev);
- if (!pf_dev)
- return NULL;
-
- if (pci_dev_driver(physfn) != &vfio_pci_driver) {
- vfio_device_put(pf_dev);
- return NULL;
- }
-
- return container_of(pf_dev, struct vfio_pci_device, vdev);
-}
-
-static void vfio_pci_vf_token_user_add(struct vfio_pci_device *vdev, int val)
-{
- struct vfio_pci_device *pf_vdev = get_pf_vdev(vdev);
-
- if (!pf_vdev)
- return;
-
- mutex_lock(&pf_vdev->vf_token->lock);
- pf_vdev->vf_token->users += val;
- WARN_ON(pf_vdev->vf_token->users < 0);
- mutex_unlock(&pf_vdev->vf_token->lock);
-
- vfio_device_put(&pf_vdev->vdev);
-}
-
-static void vfio_pci_release(struct vfio_device *core_vdev)
-{
- struct vfio_pci_device *vdev =
- container_of(core_vdev, struct vfio_pci_device, vdev);
-
- mutex_lock(&vdev->reflck->lock);
-
- if (!(--vdev->refcnt)) {
- vfio_pci_vf_token_user_add(vdev, -1);
- vfio_spapr_pci_eeh_release(vdev->pdev);
- vfio_pci_disable(vdev);
-
- mutex_lock(&vdev->igate);
- if (vdev->err_trigger) {
- eventfd_ctx_put(vdev->err_trigger);
- vdev->err_trigger = NULL;
- }
- if (vdev->req_trigger) {
- eventfd_ctx_put(vdev->req_trigger);
- vdev->req_trigger = NULL;
- }
- mutex_unlock(&vdev->igate);
- }
-
- mutex_unlock(&vdev->reflck->lock);
-}
-
-static int vfio_pci_open(struct vfio_device *core_vdev)
-{
- struct vfio_pci_device *vdev =
- container_of(core_vdev, struct vfio_pci_device, vdev);
- int ret = 0;
-
- mutex_lock(&vdev->reflck->lock);
-
- if (!vdev->refcnt) {
- ret = vfio_pci_enable(vdev);
- if (ret)
- goto error;
-
- vfio_spapr_pci_eeh_open(vdev->pdev);
- vfio_pci_vf_token_user_add(vdev, 1);
- }
- vdev->refcnt++;
-error:
- mutex_unlock(&vdev->reflck->lock);
- return ret;
-}
-
-static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
-{
- if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
- u8 pin;
-
- if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
- vdev->nointx || vdev->pdev->is_virtfn)
- return 0;
-
- pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
-
- return pin ? 1 : 0;
- } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
- u8 pos;
- u16 flags;
-
- pos = vdev->pdev->msi_cap;
- if (pos) {
- pci_read_config_word(vdev->pdev,
- pos + PCI_MSI_FLAGS, &flags);
- return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
- }
- } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
- u8 pos;
- u16 flags;
-
- pos = vdev->pdev->msix_cap;
- if (pos) {
- pci_read_config_word(vdev->pdev,
- pos + PCI_MSIX_FLAGS, &flags);
-
- return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
- }
- } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
- if (pci_is_pcie(vdev->pdev))
- return 1;
- } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
- return 1;
- }
-
- return 0;
-}
-
-static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
-{
- (*(int *)data)++;
- return 0;
-}
-
-struct vfio_pci_fill_info {
- int max;
- int cur;
- struct vfio_pci_dependent_device *devices;
-};
-
-static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
-{
- struct vfio_pci_fill_info *fill = data;
- struct iommu_group *iommu_group;
-
- if (fill->cur == fill->max)
- return -EAGAIN; /* Something changed, try again */
-
- iommu_group = iommu_group_get(&pdev->dev);
- if (!iommu_group)
- return -EPERM; /* Cannot reset non-isolated devices */
-
- fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
- fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
- fill->devices[fill->cur].bus = pdev->bus->number;
- fill->devices[fill->cur].devfn = pdev->devfn;
- fill->cur++;
- iommu_group_put(iommu_group);
- return 0;
-}
-
-struct vfio_pci_group_entry {
- struct vfio_group *group;
- int id;
-};
-
-struct vfio_pci_group_info {
- int count;
- struct vfio_pci_group_entry *groups;
-};
-
-static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
-{
- struct vfio_pci_group_info *info = data;
- struct iommu_group *group;
- int id, i;
-
- group = iommu_group_get(&pdev->dev);
- if (!group)
- return -EPERM;
-
- id = iommu_group_id(group);
-
- for (i = 0; i < info->count; i++)
- if (info->groups[i].id == id)
- break;
-
- iommu_group_put(group);
-
- return (i == info->count) ? -EINVAL : 0;
-}
-
-static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
-{
- for (; pdev; pdev = pdev->bus->self)
- if (pdev->bus == slot->bus)
- return (pdev->slot == slot);
- return false;
-}
-
-struct vfio_pci_walk_info {
- int (*fn)(struct pci_dev *, void *data);
- void *data;
- struct pci_dev *pdev;
- bool slot;
- int ret;
-};
-
-static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
-{
- struct vfio_pci_walk_info *walk = data;
-
- if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
- walk->ret = walk->fn(pdev, walk->data);
-
- return walk->ret;
-}
-
-static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
- int (*fn)(struct pci_dev *,
- void *data), void *data,
- bool slot)
-{
- struct vfio_pci_walk_info walk = {
- .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
- };
-
- pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
-
- return walk.ret;
-}
-
-static int msix_mmappable_cap(struct vfio_pci_device *vdev,
- struct vfio_info_cap *caps)
-{
- struct vfio_info_cap_header header = {
- .id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE,
- .version = 1
- };
-
- return vfio_info_add_capability(caps, &header, sizeof(header));
-}
-
-int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
- unsigned int type, unsigned int subtype,
- const struct vfio_pci_regops *ops,
- size_t size, u32 flags, void *data)
-{
- struct vfio_pci_region *region;
-
- region = krealloc(vdev->region,
- (vdev->num_regions + 1) * sizeof(*region),
- GFP_KERNEL);
- if (!region)
- return -ENOMEM;
-
- vdev->region = region;
- vdev->region[vdev->num_regions].type = type;
- vdev->region[vdev->num_regions].subtype = subtype;
- vdev->region[vdev->num_regions].ops = ops;
- vdev->region[vdev->num_regions].size = size;
- vdev->region[vdev->num_regions].flags = flags;
- vdev->region[vdev->num_regions].data = data;
-
- vdev->num_regions++;
-
- return 0;
-}
-
-struct vfio_devices {
- struct vfio_pci_device **devices;
- int cur_index;
- int max_index;
-};
-
-static long vfio_pci_ioctl(struct vfio_device *core_vdev,
- unsigned int cmd, unsigned long arg)
-{
- struct vfio_pci_device *vdev =
- container_of(core_vdev, struct vfio_pci_device, vdev);
- unsigned long minsz;
-
- if (cmd == VFIO_DEVICE_GET_INFO) {
- struct vfio_device_info info;
- struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- unsigned long capsz;
- int ret;
-
- minsz = offsetofend(struct vfio_device_info, num_irqs);
-
- /* For backward compatibility, cannot require this */
- capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
-
- if (copy_from_user(&info, (void __user *)arg, minsz))
- return -EFAULT;
-
- if (info.argsz < minsz)
- return -EINVAL;
-
- if (info.argsz >= capsz) {
- minsz = capsz;
- info.cap_offset = 0;
- }
-
- info.flags = VFIO_DEVICE_FLAGS_PCI;
-
- if (vdev->reset_works)
- info.flags |= VFIO_DEVICE_FLAGS_RESET;
-
- info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
- info.num_irqs = VFIO_PCI_NUM_IRQS;
-
- ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
- if (ret && ret != -ENODEV) {
- pci_warn(vdev->pdev, "Failed to setup zPCI info capabilities\n");
- return ret;
- }
-
- if (caps.size) {
- info.flags |= VFIO_DEVICE_FLAGS_CAPS;
- if (info.argsz < sizeof(info) + caps.size) {
- info.argsz = sizeof(info) + caps.size;
- } else {
- vfio_info_cap_shift(&caps, sizeof(info));
- if (copy_to_user((void __user *)arg +
- sizeof(info), caps.buf,
- caps.size)) {
- kfree(caps.buf);
- return -EFAULT;
- }
- info.cap_offset = sizeof(info);
- }
-
- kfree(caps.buf);
- }
-
- return copy_to_user((void __user *)arg, &info, minsz) ?
- -EFAULT : 0;
-
- } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
- struct pci_dev *pdev = vdev->pdev;
- struct vfio_region_info info;
- struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- int i, ret;
-
- minsz = offsetofend(struct vfio_region_info, offset);
-
- if (copy_from_user(&info, (void __user *)arg, minsz))
- return -EFAULT;
-
- if (info.argsz < minsz)
- return -EINVAL;
-
- switch (info.index) {
- case VFIO_PCI_CONFIG_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = pdev->cfg_size;
- info.flags = VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
- break;
- case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = pci_resource_len(pdev, info.index);
- if (!info.size) {
- info.flags = 0;
- break;
- }
-
- info.flags = VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
- if (vdev->bar_mmap_supported[info.index]) {
- info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
- if (info.index == vdev->msix_bar) {
- ret = msix_mmappable_cap(vdev, &caps);
- if (ret)
- return ret;
- }
- }
-
- break;
- case VFIO_PCI_ROM_REGION_INDEX:
- {
- void __iomem *io;
- size_t size;
- u16 cmd;
-
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.flags = 0;
-
- /* Report the BAR size, not the ROM size */
- info.size = pci_resource_len(pdev, info.index);
- if (!info.size) {
- /* Shadow ROMs appear as PCI option ROMs */
- if (pdev->resource[PCI_ROM_RESOURCE].flags &
- IORESOURCE_ROM_SHADOW)
- info.size = 0x20000;
- else
- break;
- }
-
- /*
- * Is it really there? Enable memory decode for
- * implicit access in pci_map_rom().
- */
- cmd = vfio_pci_memory_lock_and_enable(vdev);
- io = pci_map_rom(pdev, &size);
- if (io) {
- info.flags = VFIO_REGION_INFO_FLAG_READ;
- pci_unmap_rom(pdev, io);
- } else {
- info.size = 0;
- }
- vfio_pci_memory_unlock_and_restore(vdev, cmd);
-
- break;
- }
- case VFIO_PCI_VGA_REGION_INDEX:
- if (!vdev->has_vga)
- return -EINVAL;
-
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = 0xc0000;
- info.flags = VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
-
- break;
- default:
- {
- struct vfio_region_info_cap_type cap_type = {
- .header.id = VFIO_REGION_INFO_CAP_TYPE,
- .header.version = 1 };
-
- if (info.index >=
- VFIO_PCI_NUM_REGIONS + vdev->num_regions)
- return -EINVAL;
- info.index = array_index_nospec(info.index,
- VFIO_PCI_NUM_REGIONS +
- vdev->num_regions);
-
- i = info.index - VFIO_PCI_NUM_REGIONS;
-
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vdev->region[i].size;
- info.flags = vdev->region[i].flags;
-
- cap_type.type = vdev->region[i].type;
- cap_type.subtype = vdev->region[i].subtype;
-
- ret = vfio_info_add_capability(&caps, &cap_type.header,
- sizeof(cap_type));
- if (ret)
- return ret;
-
- if (vdev->region[i].ops->add_capability) {
- ret = vdev->region[i].ops->add_capability(vdev,
- &vdev->region[i], &caps);
- if (ret)
- return ret;
- }
- }
- }
-
- if (caps.size) {
- info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
- if (info.argsz < sizeof(info) + caps.size) {
- info.argsz = sizeof(info) + caps.size;
- info.cap_offset = 0;
- } else {
- vfio_info_cap_shift(&caps, sizeof(info));
- if (copy_to_user((void __user *)arg +
- sizeof(info), caps.buf,
- caps.size)) {
- kfree(caps.buf);
- return -EFAULT;
- }
- info.cap_offset = sizeof(info);
- }
-
- kfree(caps.buf);
- }
-
- return copy_to_user((void __user *)arg, &info, minsz) ?
- -EFAULT : 0;
-
- } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
- struct vfio_irq_info info;
-
- minsz = offsetofend(struct vfio_irq_info, count);
-
- if (copy_from_user(&info, (void __user *)arg, minsz))
- return -EFAULT;
-
- if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
- return -EINVAL;
-
- switch (info.index) {
- case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
- case VFIO_PCI_REQ_IRQ_INDEX:
- break;
- case VFIO_PCI_ERR_IRQ_INDEX:
- if (pci_is_pcie(vdev->pdev))
- break;
- fallthrough;
- default:
- return -EINVAL;
- }
-
- info.flags = VFIO_IRQ_INFO_EVENTFD;
-
- info.count = vfio_pci_get_irq_count(vdev, info.index);
-
- if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
- info.flags |= (VFIO_IRQ_INFO_MASKABLE |
- VFIO_IRQ_INFO_AUTOMASKED);
- else
- info.flags |= VFIO_IRQ_INFO_NORESIZE;
-
- return copy_to_user((void __user *)arg, &info, minsz) ?
- -EFAULT : 0;
-
- } else if (cmd == VFIO_DEVICE_SET_IRQS) {
- struct vfio_irq_set hdr;
- u8 *data = NULL;
- int max, ret = 0;
- size_t data_size = 0;
-
- minsz = offsetofend(struct vfio_irq_set, count);
-
- if (copy_from_user(&hdr, (void __user *)arg, minsz))
- return -EFAULT;
-
- max = vfio_pci_get_irq_count(vdev, hdr.index);
-
- ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
- VFIO_PCI_NUM_IRQS, &data_size);
- if (ret)
+ vfio_pci_core_disable(vdev);
return ret;
-
- if (data_size) {
- data = memdup_user((void __user *)(arg + minsz),
- data_size);
- if (IS_ERR(data))
- return PTR_ERR(data);
}
-
- mutex_lock(&vdev->igate);
-
- ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
- hdr.start, hdr.count, data);
-
- mutex_unlock(&vdev->igate);
- kfree(data);
-
- return ret;
-
- } else if (cmd == VFIO_DEVICE_RESET) {
- int ret;
-
- if (!vdev->reset_works)
- return -EINVAL;
-
- vfio_pci_zap_and_down_write_memory_lock(vdev);
- ret = pci_try_reset_function(vdev->pdev);
- up_write(&vdev->memory_lock);
-
- return ret;
-
- } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
- struct vfio_pci_hot_reset_info hdr;
- struct vfio_pci_fill_info fill = { 0 };
- struct vfio_pci_dependent_device *devices = NULL;
- bool slot = false;
- int ret = 0;
-
- minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
-
- if (copy_from_user(&hdr, (void __user *)arg, minsz))
- return -EFAULT;
-
- if (hdr.argsz < minsz)
- return -EINVAL;
-
- hdr.flags = 0;
-
- /* Can we do a slot or bus reset or neither? */
- if (!pci_probe_reset_slot(vdev->pdev->slot))
- slot = true;
- else if (pci_probe_reset_bus(vdev->pdev->bus))
- return -ENODEV;
-
- /* How many devices are affected? */
- ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_count_devs,
- &fill.max, slot);
- if (ret)
- return ret;
-
- WARN_ON(!fill.max); /* Should always be at least one */
-
- /*
- * If there's enough space, fill it now, otherwise return
- * -ENOSPC and the number of devices affected.
- */
- if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
- ret = -ENOSPC;
- hdr.count = fill.max;
- goto reset_info_exit;
- }
-
- devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
- if (!devices)
- return -ENOMEM;
-
- fill.devices = devices;
-
- ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_fill_devs,
- &fill, slot);
-
- /*
- * If a device was removed between counting and filling,
- * we may come up short of fill.max. If a device was
- * added, we'll have a return of -EAGAIN above.
- */
- if (!ret)
- hdr.count = fill.cur;
-
-reset_info_exit:
- if (copy_to_user((void __user *)arg, &hdr, minsz))
- ret = -EFAULT;
-
- if (!ret) {
- if (copy_to_user((void __user *)(arg + minsz), devices,
- hdr.count * sizeof(*devices)))
- ret = -EFAULT;
- }
-
- kfree(devices);
- return ret;
-
- } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
- struct vfio_pci_hot_reset hdr;
- int32_t *group_fds;
- struct vfio_pci_group_entry *groups;
- struct vfio_pci_group_info info;
- struct vfio_devices devs = { .cur_index = 0 };
- bool slot = false;
- int i, group_idx, mem_idx = 0, count = 0, ret = 0;
-
- minsz = offsetofend(struct vfio_pci_hot_reset, count);
-
- if (copy_from_user(&hdr, (void __user *)arg, minsz))
- return -EFAULT;
-
- if (hdr.argsz < minsz || hdr.flags)
- return -EINVAL;
-
- /* Can we do a slot or bus reset or neither? */
- if (!pci_probe_reset_slot(vdev->pdev->slot))
- slot = true;
- else if (pci_probe_reset_bus(vdev->pdev->bus))
- return -ENODEV;
-
- /*
- * We can't let userspace give us an arbitrarily large
- * buffer to copy, so verify how many we think there
- * could be. Note groups can have multiple devices so
- * one group per device is the max.
- */
- ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_count_devs,
- &count, slot);
- if (ret)
- return ret;
-
- /* Somewhere between 1 and count is OK */
- if (!hdr.count || hdr.count > count)
- return -EINVAL;
-
- group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
- groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
- if (!group_fds || !groups) {
- kfree(group_fds);
- kfree(groups);
- return -ENOMEM;
- }
-
- if (copy_from_user(group_fds, (void __user *)(arg + minsz),
- hdr.count * sizeof(*group_fds))) {
- kfree(group_fds);
- kfree(groups);
- return -EFAULT;
- }
-
- /*
- * For each group_fd, get the group through the vfio external
- * user interface and store the group and iommu ID. This
- * ensures the group is held across the reset.
- */
- for (group_idx = 0; group_idx < hdr.count; group_idx++) {
- struct vfio_group *group;
- struct fd f = fdget(group_fds[group_idx]);
- if (!f.file) {
- ret = -EBADF;
- break;
- }
-
- group = vfio_group_get_external_user(f.file);
- fdput(f);
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
- break;
- }
-
- groups[group_idx].group = group;
- groups[group_idx].id =
- vfio_external_user_iommu_id(group);
- }
-
- kfree(group_fds);
-
- /* release reference to groups on error */
- if (ret)
- goto hot_reset_release;
-
- info.count = hdr.count;
- info.groups = groups;
-
- /*
- * Test whether all the affected devices are contained
- * by the set of groups provided by the user.
- */
- ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_validate_devs,
- &info, slot);
- if (ret)
- goto hot_reset_release;
-
- devs.max_index = count;
- devs.devices = kcalloc(count, sizeof(struct vfio_device *),
- GFP_KERNEL);
- if (!devs.devices) {
- ret = -ENOMEM;
- goto hot_reset_release;
- }
-
- /*
- * We need to get memory_lock for each device, but devices
- * can share mmap_lock, therefore we need to zap and hold
- * the vma_lock for each device, and only then get each
- * memory_lock.
- */
- ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_try_zap_and_vma_lock_cb,
- &devs, slot);
- if (ret)
- goto hot_reset_release;
-
- for (; mem_idx < devs.cur_index; mem_idx++) {
- struct vfio_pci_device *tmp = devs.devices[mem_idx];
-
- ret = down_write_trylock(&tmp->memory_lock);
- if (!ret) {
- ret = -EBUSY;
- goto hot_reset_release;
- }
- mutex_unlock(&tmp->vma_lock);
- }
-
- /* User has access, do the reset */
- ret = pci_reset_bus(vdev->pdev);
-
-hot_reset_release:
- for (i = 0; i < devs.cur_index; i++) {
- struct vfio_pci_device *tmp = devs.devices[i];
-
- if (i < mem_idx)
- up_write(&tmp->memory_lock);
- else
- mutex_unlock(&tmp->vma_lock);
- vfio_device_put(&tmp->vdev);
- }
- kfree(devs.devices);
-
- for (group_idx--; group_idx >= 0; group_idx--)
- vfio_group_put_external_user(groups[group_idx].group);
-
- kfree(groups);
- return ret;
- } else if (cmd == VFIO_DEVICE_IOEVENTFD) {
- struct vfio_device_ioeventfd ioeventfd;
- int count;
-
- minsz = offsetofend(struct vfio_device_ioeventfd, fd);
-
- if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
- return -EFAULT;
-
- if (ioeventfd.argsz < minsz)
- return -EINVAL;
-
- if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
- return -EINVAL;
-
- count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
-
- if (hweight8(count) != 1 || ioeventfd.fd < -1)
- return -EINVAL;
-
- return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
- ioeventfd.data, count, ioeventfd.fd);
- } else if (cmd == VFIO_DEVICE_FEATURE) {
- struct vfio_device_feature feature;
- uuid_t uuid;
-
- minsz = offsetofend(struct vfio_device_feature, flags);
-
- if (copy_from_user(&feature, (void __user *)arg, minsz))
- return -EFAULT;
-
- if (feature.argsz < minsz)
- return -EINVAL;
-
- /* Check unknown flags */
- if (feature.flags & ~(VFIO_DEVICE_FEATURE_MASK |
- VFIO_DEVICE_FEATURE_SET |
- VFIO_DEVICE_FEATURE_GET |
- VFIO_DEVICE_FEATURE_PROBE))
- return -EINVAL;
-
- /* GET & SET are mutually exclusive except with PROBE */
- if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) &&
- (feature.flags & VFIO_DEVICE_FEATURE_SET) &&
- (feature.flags & VFIO_DEVICE_FEATURE_GET))
- return -EINVAL;
-
- switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) {
- case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
- if (!vdev->vf_token)
- return -ENOTTY;
-
- /*
- * We do not support GET of the VF Token UUID as this
- * could expose the token of the previous device user.
- */
- if (feature.flags & VFIO_DEVICE_FEATURE_GET)
- return -EINVAL;
-
- if (feature.flags & VFIO_DEVICE_FEATURE_PROBE)
- return 0;
-
- /* Don't SET unless told to do so */
- if (!(feature.flags & VFIO_DEVICE_FEATURE_SET))
- return -EINVAL;
-
- if (feature.argsz < minsz + sizeof(uuid))
- return -EINVAL;
-
- if (copy_from_user(&uuid, (void __user *)(arg + minsz),
- sizeof(uuid)))
- return -EFAULT;
-
- mutex_lock(&vdev->vf_token->lock);
- uuid_copy(&vdev->vf_token->uuid, &uuid);
- mutex_unlock(&vdev->vf_token->lock);
-
- return 0;
- default:
- return -ENOTTY;
- }
- }
-
- return -ENOTTY;
-}
-
-static ssize_t vfio_pci_rw(struct vfio_pci_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite)
-{
- unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
-
- if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
- return -EINVAL;
-
- switch (index) {
- case VFIO_PCI_CONFIG_REGION_INDEX:
- return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
-
- case VFIO_PCI_ROM_REGION_INDEX:
- if (iswrite)
- return -EINVAL;
- return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
-
- case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
- return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
-
- case VFIO_PCI_VGA_REGION_INDEX:
- return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
- default:
- index -= VFIO_PCI_NUM_REGIONS;
- return vdev->region[index].ops->rw(vdev, buf,
- count, ppos, iswrite);
- }
-
- return -EINVAL;
-}
-
-static ssize_t vfio_pci_read(struct vfio_device *core_vdev, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct vfio_pci_device *vdev =
- container_of(core_vdev, struct vfio_pci_device, vdev);
-
- if (!count)
- return 0;
-
- return vfio_pci_rw(vdev, buf, count, ppos, false);
-}
-
-static ssize_t vfio_pci_write(struct vfio_device *core_vdev, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct vfio_pci_device *vdev =
- container_of(core_vdev, struct vfio_pci_device, vdev);
-
- if (!count)
- return 0;
-
- return vfio_pci_rw(vdev, (char __user *)buf, count, ppos, true);
-}
-
-/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
-static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
-{
- struct vfio_pci_mmap_vma *mmap_vma, *tmp;
-
- /*
- * Lock ordering:
- * vma_lock is nested under mmap_lock for vm_ops callback paths.
- * The memory_lock semaphore is used by both code paths calling
- * into this function to zap vmas and the vm_ops.fault callback
- * to protect the memory enable state of the device.
- *
- * When zapping vmas we need to maintain the mmap_lock => vma_lock
- * ordering, which requires using vma_lock to walk vma_list to
- * acquire an mm, then dropping vma_lock to get the mmap_lock and
- * reacquiring vma_lock. This logic is derived from similar
- * requirements in uverbs_user_mmap_disassociate().
- *
- * mmap_lock must always be the top-level lock when it is taken.
- * Therefore we can only hold the memory_lock write lock when
- * vma_list is empty, as we'd need to take mmap_lock to clear
- * entries. vma_list can only be guaranteed empty when holding
- * vma_lock, thus memory_lock is nested under vma_lock.
- *
- * This enables the vm_ops.fault callback to acquire vma_lock,
- * followed by memory_lock read lock, while already holding
- * mmap_lock without risk of deadlock.
- */
- while (1) {
- struct mm_struct *mm = NULL;
-
- if (try) {
- if (!mutex_trylock(&vdev->vma_lock))
- return 0;
- } else {
- mutex_lock(&vdev->vma_lock);
- }
- while (!list_empty(&vdev->vma_list)) {
- mmap_vma = list_first_entry(&vdev->vma_list,
- struct vfio_pci_mmap_vma,
- vma_next);
- mm = mmap_vma->vma->vm_mm;
- if (mmget_not_zero(mm))
- break;
-
- list_del(&mmap_vma->vma_next);
- kfree(mmap_vma);
- mm = NULL;
- }
- if (!mm)
- return 1;
- mutex_unlock(&vdev->vma_lock);
-
- if (try) {
- if (!mmap_read_trylock(mm)) {
- mmput(mm);
- return 0;
- }
- } else {
- mmap_read_lock(mm);
- }
- if (try) {
- if (!mutex_trylock(&vdev->vma_lock)) {
- mmap_read_unlock(mm);
- mmput(mm);
- return 0;
- }
- } else {
- mutex_lock(&vdev->vma_lock);
- }
- list_for_each_entry_safe(mmap_vma, tmp,
- &vdev->vma_list, vma_next) {
- struct vm_area_struct *vma = mmap_vma->vma;
-
- if (vma->vm_mm != mm)
- continue;
-
- list_del(&mmap_vma->vma_next);
- kfree(mmap_vma);
-
- zap_vma_ptes(vma, vma->vm_start,
- vma->vm_end - vma->vm_start);
- }
- mutex_unlock(&vdev->vma_lock);
- mmap_read_unlock(mm);
- mmput(mm);
- }
-}
-
-void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
-{
- vfio_pci_zap_and_vma_lock(vdev, false);
- down_write(&vdev->memory_lock);
- mutex_unlock(&vdev->vma_lock);
-}
-
-u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev)
-{
- u16 cmd;
-
- down_write(&vdev->memory_lock);
- pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
- if (!(cmd & PCI_COMMAND_MEMORY))
- pci_write_config_word(vdev->pdev, PCI_COMMAND,
- cmd | PCI_COMMAND_MEMORY);
-
- return cmd;
-}
-
-void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd)
-{
- pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
- up_write(&vdev->memory_lock);
-}
-
-/* Caller holds vma_lock */
-static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
- struct vm_area_struct *vma)
-{
- struct vfio_pci_mmap_vma *mmap_vma;
-
- mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
- if (!mmap_vma)
- return -ENOMEM;
-
- mmap_vma->vma = vma;
- list_add(&mmap_vma->vma_next, &vdev->vma_list);
-
- return 0;
-}
-
-/*
- * Zap mmaps on open so that we can fault them in on access and therefore
- * our vma_list only tracks mappings accessed since last zap.
- */
-static void vfio_pci_mmap_open(struct vm_area_struct *vma)
-{
- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
-}
-
-static void vfio_pci_mmap_close(struct vm_area_struct *vma)
-{
- struct vfio_pci_device *vdev = vma->vm_private_data;
- struct vfio_pci_mmap_vma *mmap_vma;
-
- mutex_lock(&vdev->vma_lock);
- list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
- if (mmap_vma->vma == vma) {
- list_del(&mmap_vma->vma_next);
- kfree(mmap_vma);
- break;
- }
- }
- mutex_unlock(&vdev->vma_lock);
-}
-
-static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct vfio_pci_device *vdev = vma->vm_private_data;
- struct vfio_pci_mmap_vma *mmap_vma;
- vm_fault_t ret = VM_FAULT_NOPAGE;
-
- mutex_lock(&vdev->vma_lock);
- down_read(&vdev->memory_lock);
-
- if (!__vfio_pci_memory_enabled(vdev)) {
- ret = VM_FAULT_SIGBUS;
- goto up_out;
- }
-
- /*
- * We populate the whole vma on fault, so we need to test whether
- * the vma has already been mapped, such as for concurrent faults
- * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
- * we ask it to fill the same range again.
- */
- list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
- if (mmap_vma->vma == vma)
- goto up_out;
- }
-
- if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot)) {
- ret = VM_FAULT_SIGBUS;
- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
- goto up_out;
- }
-
- if (__vfio_pci_add_vma(vdev, vma)) {
- ret = VM_FAULT_OOM;
- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
- }
-
-up_out:
- up_read(&vdev->memory_lock);
- mutex_unlock(&vdev->vma_lock);
- return ret;
-}
-
-static const struct vm_operations_struct vfio_pci_mmap_ops = {
- .open = vfio_pci_mmap_open,
- .close = vfio_pci_mmap_close,
- .fault = vfio_pci_mmap_fault,
-};
-
-static int vfio_pci_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
-{
- struct vfio_pci_device *vdev =
- container_of(core_vdev, struct vfio_pci_device, vdev);
- struct pci_dev *pdev = vdev->pdev;
- unsigned int index;
- u64 phys_len, req_len, pgoff, req_start;
- int ret;
-
- index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
-
- if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
- return -EINVAL;
- if (vma->vm_end < vma->vm_start)
- return -EINVAL;
- if ((vma->vm_flags & VM_SHARED) == 0)
- return -EINVAL;
- if (index >= VFIO_PCI_NUM_REGIONS) {
- int regnum = index - VFIO_PCI_NUM_REGIONS;
- struct vfio_pci_region *region = vdev->region + regnum;
-
- if (region->ops && region->ops->mmap &&
- (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
- return region->ops->mmap(vdev, region, vma);
- return -EINVAL;
}
- if (index >= VFIO_PCI_ROM_REGION_INDEX)
- return -EINVAL;
- if (!vdev->bar_mmap_supported[index])
- return -EINVAL;
- phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
- req_len = vma->vm_end - vma->vm_start;
- pgoff = vma->vm_pgoff &
- ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
- req_start = pgoff << PAGE_SHIFT;
-
- if (req_start + req_len > phys_len)
- return -EINVAL;
-
- /*
- * Even though we don't make use of the barmap for the mmap,
- * we need to request the region and the barmap tracks that.
- */
- if (!vdev->barmap[index]) {
- ret = pci_request_selected_regions(pdev,
- 1 << index, "vfio-pci");
- if (ret)
- return ret;
-
- vdev->barmap[index] = pci_iomap(pdev, index, 0);
- if (!vdev->barmap[index]) {
- pci_release_selected_regions(pdev, 1 << index);
- return -ENOMEM;
- }
- }
-
- vma->vm_private_data = vdev;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
-
- /*
- * See remap_pfn_range(), called from vfio_pci_fault() but we can't
- * change vm_flags within the fault handler. Set them now.
- */
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_ops = &vfio_pci_mmap_ops;
+ vfio_pci_core_finish_enable(vdev);
return 0;
}
-static void vfio_pci_request(struct vfio_device *core_vdev, unsigned int count)
-{
- struct vfio_pci_device *vdev =
- container_of(core_vdev, struct vfio_pci_device, vdev);
- struct pci_dev *pdev = vdev->pdev;
-
- mutex_lock(&vdev->igate);
-
- if (vdev->req_trigger) {
- if (!(count % 10))
- pci_notice_ratelimited(pdev,
- "Relaying device request to user (#%u)\n",
- count);
- eventfd_signal(vdev->req_trigger, 1);
- } else if (count == 0) {
- pci_warn(pdev,
- "No device request channel registered, blocked until released by user\n");
- }
-
- mutex_unlock(&vdev->igate);
-}
-
-static int vfio_pci_validate_vf_token(struct vfio_pci_device *vdev,
- bool vf_token, uuid_t *uuid)
-{
- /*
- * There's always some degree of trust or collaboration between SR-IOV
- * PF and VFs, even if just that the PF hosts the SR-IOV capability and
- * can disrupt VFs with a reset, but often the PF has more explicit
- * access to deny service to the VF or access data passed through the
- * VF. We therefore require an opt-in via a shared VF token (UUID) to
- * represent this trust. This both prevents that a VF driver might
- * assume the PF driver is a trusted, in-kernel driver, and also that
- * a PF driver might be replaced with a rogue driver, unknown to in-use
- * VF drivers.
- *
- * Therefore when presented with a VF, if the PF is a vfio device and
- * it is bound to the vfio-pci driver, the user needs to provide a VF
- * token to access the device, in the form of appending a vf_token to
- * the device name, for example:
- *
- * "0000:04:10.0 vf_token=bd8d9d2b-5a5f-4f5a-a211-f591514ba1f3"
- *
- * When presented with a PF which has VFs in use, the user must also
- * provide the current VF token to prove collaboration with existing
- * VF users. If VFs are not in use, the VF token provided for the PF
- * device will act to set the VF token.
- *
- * If the VF token is provided but unused, an error is generated.
- */
- if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token)
- return 0; /* No VF token provided or required */
-
- if (vdev->pdev->is_virtfn) {
- struct vfio_pci_device *pf_vdev = get_pf_vdev(vdev);
- bool match;
-
- if (!pf_vdev) {
- if (!vf_token)
- return 0; /* PF is not vfio-pci, no VF token */
-
- pci_info_ratelimited(vdev->pdev,
- "VF token incorrectly provided, PF not bound to vfio-pci\n");
- return -EINVAL;
- }
-
- if (!vf_token) {
- vfio_device_put(&pf_vdev->vdev);
- pci_info_ratelimited(vdev->pdev,
- "VF token required to access device\n");
- return -EACCES;
- }
-
- mutex_lock(&pf_vdev->vf_token->lock);
- match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
- mutex_unlock(&pf_vdev->vf_token->lock);
-
- vfio_device_put(&pf_vdev->vdev);
-
- if (!match) {
- pci_info_ratelimited(vdev->pdev,
- "Incorrect VF token provided for device\n");
- return -EACCES;
- }
- } else if (vdev->vf_token) {
- mutex_lock(&vdev->vf_token->lock);
- if (vdev->vf_token->users) {
- if (!vf_token) {
- mutex_unlock(&vdev->vf_token->lock);
- pci_info_ratelimited(vdev->pdev,
- "VF token required to access device\n");
- return -EACCES;
- }
-
- if (!uuid_equal(uuid, &vdev->vf_token->uuid)) {
- mutex_unlock(&vdev->vf_token->lock);
- pci_info_ratelimited(vdev->pdev,
- "Incorrect VF token provided for device\n");
- return -EACCES;
- }
- } else if (vf_token) {
- uuid_copy(&vdev->vf_token->uuid, uuid);
- }
-
- mutex_unlock(&vdev->vf_token->lock);
- } else if (vf_token) {
- pci_info_ratelimited(vdev->pdev,
- "VF token incorrectly provided, not a PF or VF\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-#define VF_TOKEN_ARG "vf_token="
-
-static int vfio_pci_match(struct vfio_device *core_vdev, char *buf)
-{
- struct vfio_pci_device *vdev =
- container_of(core_vdev, struct vfio_pci_device, vdev);
- bool vf_token = false;
- uuid_t uuid;
- int ret;
-
- if (strncmp(pci_name(vdev->pdev), buf, strlen(pci_name(vdev->pdev))))
- return 0; /* No match */
-
- if (strlen(buf) > strlen(pci_name(vdev->pdev))) {
- buf += strlen(pci_name(vdev->pdev));
-
- if (*buf != ' ')
- return 0; /* No match: non-whitespace after name */
-
- while (*buf) {
- if (*buf == ' ') {
- buf++;
- continue;
- }
-
- if (!vf_token && !strncmp(buf, VF_TOKEN_ARG,
- strlen(VF_TOKEN_ARG))) {
- buf += strlen(VF_TOKEN_ARG);
-
- if (strlen(buf) < UUID_STRING_LEN)
- return -EINVAL;
-
- ret = uuid_parse(buf, &uuid);
- if (ret)
- return ret;
-
- vf_token = true;
- buf += UUID_STRING_LEN;
- } else {
- /* Unknown/duplicate option */
- return -EINVAL;
- }
- }
- }
-
- ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid);
- if (ret)
- return ret;
-
- return 1; /* Match */
-}
-
static const struct vfio_device_ops vfio_pci_ops = {
.name = "vfio-pci",
- .open = vfio_pci_open,
- .release = vfio_pci_release,
- .ioctl = vfio_pci_ioctl,
- .read = vfio_pci_read,
- .write = vfio_pci_write,
- .mmap = vfio_pci_mmap,
- .request = vfio_pci_request,
- .match = vfio_pci_match,
+ .open_device = vfio_pci_open_device,
+ .close_device = vfio_pci_core_close_device,
+ .ioctl = vfio_pci_core_ioctl,
+ .read = vfio_pci_core_read,
+ .write = vfio_pci_core_write,
+ .mmap = vfio_pci_core_mmap,
+ .request = vfio_pci_core_request,
+ .match = vfio_pci_core_match,
};
-static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev);
-static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck);
-
-static int vfio_pci_bus_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct vfio_pci_device *vdev = container_of(nb,
- struct vfio_pci_device, nb);
- struct device *dev = data;
- struct pci_dev *pdev = to_pci_dev(dev);
- struct pci_dev *physfn = pci_physfn(pdev);
-
- if (action == BUS_NOTIFY_ADD_DEVICE &&
- pdev->is_virtfn && physfn == vdev->pdev) {
- pci_info(vdev->pdev, "Captured SR-IOV VF %s driver_override\n",
- pci_name(pdev));
- pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
- vfio_pci_ops.name);
- } else if (action == BUS_NOTIFY_BOUND_DRIVER &&
- pdev->is_virtfn && physfn == vdev->pdev) {
- struct pci_driver *drv = pci_dev_driver(pdev);
-
- if (drv && drv != &vfio_pci_driver)
- pci_warn(vdev->pdev,
- "VF %s bound to driver %s while PF bound to vfio-pci\n",
- pci_name(pdev), drv->name);
- }
-
- return 0;
-}
-
-static int vfio_pci_vf_init(struct vfio_pci_device *vdev)
-{
- struct pci_dev *pdev = vdev->pdev;
- int ret;
-
- if (!pdev->is_physfn)
- return 0;
-
- vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
- if (!vdev->vf_token)
- return -ENOMEM;
-
- mutex_init(&vdev->vf_token->lock);
- uuid_gen(&vdev->vf_token->uuid);
-
- vdev->nb.notifier_call = vfio_pci_bus_notifier;
- ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
- if (ret) {
- kfree(vdev->vf_token);
- return ret;
- }
- return 0;
-}
-
-static void vfio_pci_vf_uninit(struct vfio_pci_device *vdev)
-{
- if (!vdev->vf_token)
- return;
-
- bus_unregister_notifier(&pci_bus_type, &vdev->nb);
- WARN_ON(vdev->vf_token->users);
- mutex_destroy(&vdev->vf_token->lock);
- kfree(vdev->vf_token);
-}
-
-static int vfio_pci_vga_init(struct vfio_pci_device *vdev)
-{
- struct pci_dev *pdev = vdev->pdev;
- int ret;
-
- if (!vfio_pci_is_vga(pdev))
- return 0;
-
- ret = vga_client_register(pdev, vfio_pci_set_decode);
- if (ret)
- return ret;
- vga_set_legacy_decoding(pdev, vfio_pci_set_decode(pdev, false));
- return 0;
-}
-
-static void vfio_pci_vga_uninit(struct vfio_pci_device *vdev)
-{
- struct pci_dev *pdev = vdev->pdev;
-
- if (!vfio_pci_is_vga(pdev))
- return;
- vga_client_unregister(pdev);
- vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
- VGA_RSRC_LEGACY_IO |
- VGA_RSRC_LEGACY_MEM);
-}
-
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct vfio_pci_device *vdev;
- struct iommu_group *group;
+ struct vfio_pci_core_device *vdev;
int ret;
if (vfio_pci_is_denylisted(pdev))
return -EINVAL;
- if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
- return -EINVAL;
-
- /*
- * Prevent binding to PFs with VFs enabled, the VFs might be in use
- * by the host or other users. We cannot capture the VFs if they
- * already exist, nor can we track VF users. Disabling SR-IOV here
- * would initiate removing the VFs, which would unbind the driver,
- * which is prone to blocking if that VF is also in use by vfio-pci.
- * Just reject these PFs and let the user sort it out.
- */
- if (pci_num_vf(pdev)) {
- pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
- return -EBUSY;
- }
-
- group = vfio_iommu_group_get(&pdev->dev);
- if (!group)
- return -EINVAL;
-
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev) {
- ret = -ENOMEM;
- goto out_group_put;
- }
-
- vfio_init_group_dev(&vdev->vdev, &pdev->dev, &vfio_pci_ops);
- vdev->pdev = pdev;
- vdev->irq_type = VFIO_PCI_NUM_IRQS;
- mutex_init(&vdev->igate);
- spin_lock_init(&vdev->irqlock);
- mutex_init(&vdev->ioeventfds_lock);
- INIT_LIST_HEAD(&vdev->dummy_resources_list);
- INIT_LIST_HEAD(&vdev->ioeventfds_list);
- mutex_init(&vdev->vma_lock);
- INIT_LIST_HEAD(&vdev->vma_list);
- init_rwsem(&vdev->memory_lock);
+ if (!vdev)
+ return -ENOMEM;
+ vfio_pci_core_init_device(vdev, pdev, &vfio_pci_ops);
- ret = vfio_pci_reflck_attach(vdev);
+ ret = vfio_pci_core_register_device(vdev);
if (ret)
goto out_free;
- ret = vfio_pci_vf_init(vdev);
- if (ret)
- goto out_reflck;
- ret = vfio_pci_vga_init(vdev);
- if (ret)
- goto out_vf;
-
- vfio_pci_probe_power_state(vdev);
-
- if (!disable_idle_d3) {
- /*
- * pci-core sets the device power state to an unknown value at
- * bootup and after being removed from a driver. The only
- * transition it allows from this unknown state is to D0, which
- * typically happens when a driver calls pci_enable_device().
- * We're not ready to enable the device yet, but we do want to
- * be able to get to D3. Therefore first do a D0 transition
- * before going to D3.
- */
- vfio_pci_set_power_state(vdev, PCI_D0);
- vfio_pci_set_power_state(vdev, PCI_D3hot);
- }
-
- ret = vfio_register_group_dev(&vdev->vdev);
- if (ret)
- goto out_power;
dev_set_drvdata(&pdev->dev, vdev);
return 0;
-out_power:
- if (!disable_idle_d3)
- vfio_pci_set_power_state(vdev, PCI_D0);
-out_vf:
- vfio_pci_vf_uninit(vdev);
-out_reflck:
- vfio_pci_reflck_put(vdev->reflck);
out_free:
- kfree(vdev->pm_save);
+ vfio_pci_core_uninit_device(vdev);
kfree(vdev);
-out_group_put:
- vfio_iommu_group_put(group, &pdev->dev);
return ret;
}
static void vfio_pci_remove(struct pci_dev *pdev)
{
- struct vfio_pci_device *vdev = dev_get_drvdata(&pdev->dev);
-
- pci_disable_sriov(pdev);
+ struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
- vfio_unregister_group_dev(&vdev->vdev);
-
- vfio_pci_vf_uninit(vdev);
- vfio_pci_reflck_put(vdev->reflck);
- vfio_pci_vga_uninit(vdev);
-
- vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
-
- if (!disable_idle_d3)
- vfio_pci_set_power_state(vdev, PCI_D0);
-
- mutex_destroy(&vdev->ioeventfds_lock);
- kfree(vdev->region);
- kfree(vdev->pm_save);
+ vfio_pci_core_unregister_device(vdev);
+ vfio_pci_core_uninit_device(vdev);
kfree(vdev);
}
-static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct vfio_pci_device *vdev;
- struct vfio_device *device;
-
- device = vfio_device_get_from_dev(&pdev->dev);
- if (device == NULL)
- return PCI_ERS_RESULT_DISCONNECT;
-
- vdev = container_of(device, struct vfio_pci_device, vdev);
-
- mutex_lock(&vdev->igate);
-
- if (vdev->err_trigger)
- eventfd_signal(vdev->err_trigger, 1);
-
- mutex_unlock(&vdev->igate);
-
- vfio_device_put(device);
-
- return PCI_ERS_RESULT_CAN_RECOVER;
-}
-
static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
{
- struct vfio_device *device;
- int ret = 0;
-
- might_sleep();
-
if (!enable_sriov)
return -ENOENT;
- device = vfio_device_get_from_dev(&pdev->dev);
- if (!device)
- return -ENODEV;
-
- if (nr_virtfn == 0)
- pci_disable_sriov(pdev);
- else
- ret = pci_enable_sriov(pdev, nr_virtfn);
-
- vfio_device_put(device);
-
- return ret < 0 ? ret : nr_virtfn;
+ return vfio_pci_core_sriov_configure(pdev, nr_virtfn);
}
-static const struct pci_error_handlers vfio_err_handlers = {
- .error_detected = vfio_pci_aer_err_detected,
+static const struct pci_device_id vfio_pci_table[] = {
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_ANY_ID, PCI_ANY_ID) }, /* match all by default */
+ {}
};
+MODULE_DEVICE_TABLE(pci, vfio_pci_table);
+
static struct pci_driver vfio_pci_driver = {
.name = "vfio-pci",
- .id_table = NULL, /* only dynamic ids */
+ .id_table = vfio_pci_table,
.probe = vfio_pci_probe,
.remove = vfio_pci_remove,
.sriov_configure = vfio_pci_sriov_configure,
- .err_handler = &vfio_err_handlers,
+ .err_handler = &vfio_pci_core_err_handlers,
};
-static DEFINE_MUTEX(reflck_lock);
-
-static struct vfio_pci_reflck *vfio_pci_reflck_alloc(void)
-{
- struct vfio_pci_reflck *reflck;
-
- reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
- if (!reflck)
- return ERR_PTR(-ENOMEM);
-
- kref_init(&reflck->kref);
- mutex_init(&reflck->lock);
-
- return reflck;
-}
-
-static void vfio_pci_reflck_get(struct vfio_pci_reflck *reflck)
-{
- kref_get(&reflck->kref);
-}
-
-static int vfio_pci_reflck_find(struct pci_dev *pdev, void *data)
-{
- struct vfio_pci_reflck **preflck = data;
- struct vfio_device *device;
- struct vfio_pci_device *vdev;
-
- device = vfio_device_get_from_dev(&pdev->dev);
- if (!device)
- return 0;
-
- if (pci_dev_driver(pdev) != &vfio_pci_driver) {
- vfio_device_put(device);
- return 0;
- }
-
- vdev = container_of(device, struct vfio_pci_device, vdev);
-
- if (vdev->reflck) {
- vfio_pci_reflck_get(vdev->reflck);
- *preflck = vdev->reflck;
- vfio_device_put(device);
- return 1;
- }
-
- vfio_device_put(device);
- return 0;
-}
-
-static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev)
-{
- bool slot = !pci_probe_reset_slot(vdev->pdev->slot);
-
- mutex_lock(&reflck_lock);
-
- if (pci_is_root_bus(vdev->pdev->bus) ||
- vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_reflck_find,
- &vdev->reflck, slot) <= 0)
- vdev->reflck = vfio_pci_reflck_alloc();
-
- mutex_unlock(&reflck_lock);
-
- return PTR_ERR_OR_ZERO(vdev->reflck);
-}
-
-static void vfio_pci_reflck_release(struct kref *kref)
-{
- struct vfio_pci_reflck *reflck = container_of(kref,
- struct vfio_pci_reflck,
- kref);
-
- kfree(reflck);
- mutex_unlock(&reflck_lock);
-}
-
-static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck)
-{
- kref_put_mutex(&reflck->kref, vfio_pci_reflck_release, &reflck_lock);
-}
-
-static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
-{
- struct vfio_devices *devs = data;
- struct vfio_device *device;
- struct vfio_pci_device *vdev;
-
- if (devs->cur_index == devs->max_index)
- return -ENOSPC;
-
- device = vfio_device_get_from_dev(&pdev->dev);
- if (!device)
- return -EINVAL;
-
- if (pci_dev_driver(pdev) != &vfio_pci_driver) {
- vfio_device_put(device);
- return -EBUSY;
- }
-
- vdev = container_of(device, struct vfio_pci_device, vdev);
-
- /* Fault if the device is not unused */
- if (vdev->refcnt) {
- vfio_device_put(device);
- return -EBUSY;
- }
-
- devs->devices[devs->cur_index++] = vdev;
- return 0;
-}
-
-static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
-{
- struct vfio_devices *devs = data;
- struct vfio_device *device;
- struct vfio_pci_device *vdev;
-
- if (devs->cur_index == devs->max_index)
- return -ENOSPC;
-
- device = vfio_device_get_from_dev(&pdev->dev);
- if (!device)
- return -EINVAL;
-
- if (pci_dev_driver(pdev) != &vfio_pci_driver) {
- vfio_device_put(device);
- return -EBUSY;
- }
-
- vdev = container_of(device, struct vfio_pci_device, vdev);
-
- /*
- * Locking multiple devices is prone to deadlock, runaway and
- * unwind if we hit contention.
- */
- if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
- vfio_device_put(device);
- return -EBUSY;
- }
-
- devs->devices[devs->cur_index++] = vdev;
- return 0;
-}
-
-/*
- * If a bus or slot reset is available for the provided device and:
- * - All of the devices affected by that bus or slot reset are unused
- * (!refcnt)
- * - At least one of the affected devices is marked dirty via
- * needs_reset (such as by lack of FLR support)
- * Then attempt to perform that bus or slot reset. Callers are required
- * to hold vdev->reflck->lock, protecting the bus/slot reset group from
- * concurrent opens. A vfio_device reference is acquired for each device
- * to prevent unbinds during the reset operation.
- *
- * NB: vfio-core considers a group to be viable even if some devices are
- * bound to drivers like pci-stub or pcieport. Here we require all devices
- * to be bound to vfio_pci since that's the only way we can be sure they
- * stay put.
- */
-static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
-{
- struct vfio_devices devs = { .cur_index = 0 };
- int i = 0, ret = -EINVAL;
- bool slot = false;
- struct vfio_pci_device *tmp;
-
- if (!pci_probe_reset_slot(vdev->pdev->slot))
- slot = true;
- else if (pci_probe_reset_bus(vdev->pdev->bus))
- return;
-
- if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
- &i, slot) || !i)
- return;
-
- devs.max_index = i;
- devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
- if (!devs.devices)
- return;
-
- if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_get_unused_devs,
- &devs, slot))
- goto put_devs;
-
- /* Does at least one need a reset? */
- for (i = 0; i < devs.cur_index; i++) {
- tmp = devs.devices[i];
- if (tmp->needs_reset) {
- ret = pci_reset_bus(vdev->pdev);
- break;
- }
- }
-
-put_devs:
- for (i = 0; i < devs.cur_index; i++) {
- tmp = devs.devices[i];
-
- /*
- * If reset was successful, affected devices no longer need
- * a reset and we should return all the collateral devices
- * to low power. If not successful, we either didn't reset
- * the bus or timed out waiting for it, so let's not touch
- * the power state.
- */
- if (!ret) {
- tmp->needs_reset = false;
-
- if (tmp != vdev && !disable_idle_d3)
- vfio_pci_set_power_state(tmp, PCI_D3hot);
- }
-
- vfio_device_put(&tmp->vdev);
- }
-
- kfree(devs.devices);
-}
-
-static void __exit vfio_pci_cleanup(void)
-{
- pci_unregister_driver(&vfio_pci_driver);
- vfio_pci_uninit_perm_bits();
-}
-
static void __init vfio_pci_fill_ids(void)
{
char *p, *id;
@@ -2418,16 +239,18 @@ static void __init vfio_pci_fill_ids(void)
static int __init vfio_pci_init(void)
{
int ret;
+ bool is_disable_vga = true;
- /* Allocate shared config space permission data used by all devices */
- ret = vfio_pci_init_perm_bits();
- if (ret)
- return ret;
+#ifdef CONFIG_VFIO_PCI_VGA
+ is_disable_vga = disable_vga;
+#endif
+
+ vfio_pci_core_set_params(nointxmask, is_disable_vga, disable_idle_d3);
/* Register and scan for devices */
ret = pci_register_driver(&vfio_pci_driver);
if (ret)
- goto out_driver;
+ return ret;
vfio_pci_fill_ids();
@@ -2435,16 +258,15 @@ static int __init vfio_pci_init(void)
pr_warn("device denylist disabled.\n");
return 0;
-
-out_driver:
- vfio_pci_uninit_perm_bits();
- return ret;
}
-
module_init(vfio_pci_init);
+
+static void __exit vfio_pci_cleanup(void)
+{
+ pci_unregister_driver(&vfio_pci_driver);
+}
module_exit(vfio_pci_cleanup);
-MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 70e28efbc51f..6e58b4bf7a60 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -26,7 +26,7 @@
#include <linux/vfio.h>
#include <linux/slab.h>
-#include "vfio_pci_private.h"
+#include <linux/vfio_pci_core.h>
/* Fake capability ID for standard config space */
#define PCI_CAP_ID_BASIC 0
@@ -108,9 +108,9 @@ static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = {
struct perm_bits {
u8 *virt; /* read/write virtual data, not hw */
u8 *write; /* writeable bits */
- int (*readfn)(struct vfio_pci_device *vdev, int pos, int count,
+ int (*readfn)(struct vfio_pci_core_device *vdev, int pos, int count,
struct perm_bits *perm, int offset, __le32 *val);
- int (*writefn)(struct vfio_pci_device *vdev, int pos, int count,
+ int (*writefn)(struct vfio_pci_core_device *vdev, int pos, int count,
struct perm_bits *perm, int offset, __le32 val);
};
@@ -171,7 +171,7 @@ static int vfio_user_config_write(struct pci_dev *pdev, int offset,
return ret;
}
-static int vfio_default_config_read(struct vfio_pci_device *vdev, int pos,
+static int vfio_default_config_read(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 *val)
{
@@ -197,7 +197,7 @@ static int vfio_default_config_read(struct vfio_pci_device *vdev, int pos,
return count;
}
-static int vfio_default_config_write(struct vfio_pci_device *vdev, int pos,
+static int vfio_default_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
@@ -244,7 +244,7 @@ static int vfio_default_config_write(struct vfio_pci_device *vdev, int pos,
}
/* Allow direct read from hardware, except for capability next pointer */
-static int vfio_direct_config_read(struct vfio_pci_device *vdev, int pos,
+static int vfio_direct_config_read(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 *val)
{
@@ -269,7 +269,7 @@ static int vfio_direct_config_read(struct vfio_pci_device *vdev, int pos,
}
/* Raw access skips any kind of virtualization */
-static int vfio_raw_config_write(struct vfio_pci_device *vdev, int pos,
+static int vfio_raw_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
@@ -282,7 +282,7 @@ static int vfio_raw_config_write(struct vfio_pci_device *vdev, int pos,
return count;
}
-static int vfio_raw_config_read(struct vfio_pci_device *vdev, int pos,
+static int vfio_raw_config_read(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 *val)
{
@@ -296,7 +296,7 @@ static int vfio_raw_config_read(struct vfio_pci_device *vdev, int pos,
}
/* Virt access uses only virtualization */
-static int vfio_virt_config_write(struct vfio_pci_device *vdev, int pos,
+static int vfio_virt_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
@@ -304,7 +304,7 @@ static int vfio_virt_config_write(struct vfio_pci_device *vdev, int pos,
return count;
}
-static int vfio_virt_config_read(struct vfio_pci_device *vdev, int pos,
+static int vfio_virt_config_read(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 *val)
{
@@ -396,7 +396,7 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
}
/* Caller should hold memory_lock semaphore */
-bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
+bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
@@ -413,7 +413,7 @@ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
* Restore the *real* BARs after we detect a FLR or backdoor reset.
* (backdoor = some device specific technique that we didn't catch)
*/
-static void vfio_bar_restore(struct vfio_pci_device *vdev)
+static void vfio_bar_restore(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
u32 *rbar = vdev->rbar;
@@ -460,7 +460,7 @@ static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar)
* Pretend we're hardware and tweak the values of the *virtual* PCI BARs
* to reflect the hardware capabilities. This implements BAR sizing.
*/
-static void vfio_bar_fixup(struct vfio_pci_device *vdev)
+static void vfio_bar_fixup(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
int i;
@@ -514,7 +514,7 @@ static void vfio_bar_fixup(struct vfio_pci_device *vdev)
vdev->bardirty = false;
}
-static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
+static int vfio_basic_config_read(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 *val)
{
@@ -536,7 +536,7 @@ static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
}
/* Test whether BARs match the value we think they should contain */
-static bool vfio_need_bar_restore(struct vfio_pci_device *vdev)
+static bool vfio_need_bar_restore(struct vfio_pci_core_device *vdev)
{
int i = 0, pos = PCI_BASE_ADDRESS_0, ret;
u32 bar;
@@ -552,7 +552,7 @@ static bool vfio_need_bar_restore(struct vfio_pci_device *vdev)
return false;
}
-static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
+static int vfio_basic_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
@@ -692,7 +692,7 @@ static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
return 0;
}
-static int vfio_pm_config_write(struct vfio_pci_device *vdev, int pos,
+static int vfio_pm_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
@@ -747,7 +747,7 @@ static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
return 0;
}
-static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos,
+static int vfio_vpd_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
@@ -829,7 +829,7 @@ static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
return 0;
}
-static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
+static int vfio_exp_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
@@ -913,7 +913,7 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
return 0;
}
-static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
+static int vfio_af_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
@@ -1072,7 +1072,7 @@ int __init vfio_pci_init_perm_bits(void)
return ret;
}
-static int vfio_find_cap_start(struct vfio_pci_device *vdev, int pos)
+static int vfio_find_cap_start(struct vfio_pci_core_device *vdev, int pos)
{
u8 cap;
int base = (pos >= PCI_CFG_SPACE_SIZE) ? PCI_CFG_SPACE_SIZE :
@@ -1089,7 +1089,7 @@ static int vfio_find_cap_start(struct vfio_pci_device *vdev, int pos)
return pos;
}
-static int vfio_msi_config_read(struct vfio_pci_device *vdev, int pos,
+static int vfio_msi_config_read(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 *val)
{
@@ -1109,7 +1109,7 @@ static int vfio_msi_config_read(struct vfio_pci_device *vdev, int pos,
return vfio_default_config_read(vdev, pos, count, perm, offset, val);
}
-static int vfio_msi_config_write(struct vfio_pci_device *vdev, int pos,
+static int vfio_msi_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
@@ -1189,7 +1189,7 @@ static int init_pci_cap_msi_perm(struct perm_bits *perm, int len, u16 flags)
}
/* Determine MSI CAP field length; initialize msi_perms on 1st call per vdev */
-static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos)
+static int vfio_msi_cap_len(struct vfio_pci_core_device *vdev, u8 pos)
{
struct pci_dev *pdev = vdev->pdev;
int len, ret;
@@ -1222,7 +1222,7 @@ static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos)
}
/* Determine extended capability length for VC (2 & 9) and MFVC */
-static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos)
+static int vfio_vc_cap_len(struct vfio_pci_core_device *vdev, u16 pos)
{
struct pci_dev *pdev = vdev->pdev;
u32 tmp;
@@ -1263,7 +1263,7 @@ static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos)
return len;
}
-static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
+static int vfio_cap_len(struct vfio_pci_core_device *vdev, u8 cap, u8 pos)
{
struct pci_dev *pdev = vdev->pdev;
u32 dword;
@@ -1338,7 +1338,7 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
return 0;
}
-static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
+static int vfio_ext_cap_len(struct vfio_pci_core_device *vdev, u16 ecap, u16 epos)
{
struct pci_dev *pdev = vdev->pdev;
u8 byte;
@@ -1412,7 +1412,7 @@ static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
return 0;
}
-static int vfio_fill_vconfig_bytes(struct vfio_pci_device *vdev,
+static int vfio_fill_vconfig_bytes(struct vfio_pci_core_device *vdev,
int offset, int size)
{
struct pci_dev *pdev = vdev->pdev;
@@ -1459,7 +1459,7 @@ static int vfio_fill_vconfig_bytes(struct vfio_pci_device *vdev,
return ret;
}
-static int vfio_cap_init(struct vfio_pci_device *vdev)
+static int vfio_cap_init(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
u8 *map = vdev->pci_config_map;
@@ -1549,7 +1549,7 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
return 0;
}
-static int vfio_ecap_init(struct vfio_pci_device *vdev)
+static int vfio_ecap_init(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
u8 *map = vdev->pci_config_map;
@@ -1669,7 +1669,7 @@ static const struct pci_device_id known_bogus_vf_intx_pin[] = {
* for each area requiring emulated bits, but the array of pointers
* would be comparable in size (at least for standard config space).
*/
-int vfio_config_init(struct vfio_pci_device *vdev)
+int vfio_config_init(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
u8 *map, *vconfig;
@@ -1773,7 +1773,7 @@ out:
return pcibios_err_to_errno(ret);
}
-void vfio_config_free(struct vfio_pci_device *vdev)
+void vfio_config_free(struct vfio_pci_core_device *vdev)
{
kfree(vdev->vconfig);
vdev->vconfig = NULL;
@@ -1790,7 +1790,7 @@ void vfio_config_free(struct vfio_pci_device *vdev)
* Find the remaining number of bytes in a dword that match the given
* position. Stop at either the end of the capability or the dword boundary.
*/
-static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_device *vdev,
+static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_core_device *vdev,
loff_t pos)
{
u8 cap = vdev->pci_config_map[pos];
@@ -1802,7 +1802,7 @@ static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_device *vdev,
return i;
}
-static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
+static ssize_t vfio_config_do_rw(struct vfio_pci_core_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
struct pci_dev *pdev = vdev->pdev;
@@ -1885,7 +1885,7 @@ static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
return ret;
}
-ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev, char __user *buf,
+ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
size_t done = 0;
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
new file mode 100644
index 000000000000..68198e0f2a63
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -0,0 +1,2157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/file.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/vgaarb.h>
+#include <linux/nospec.h>
+#include <linux/sched/mm.h>
+
+#include <linux/vfio_pci_core.h>
+
+#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
+#define DRIVER_DESC "core driver for VFIO based PCI devices"
+
+static bool nointxmask;
+static bool disable_vga;
+static bool disable_idle_d3;
+
+static inline bool vfio_vga_disabled(void)
+{
+#ifdef CONFIG_VFIO_PCI_VGA
+ return disable_vga;
+#else
+ return true;
+#endif
+}
+
+/*
+ * Our VGA arbiter participation is limited since we don't know anything
+ * about the device itself. However, if the device is the only VGA device
+ * downstream of a bridge and VFIO VGA support is disabled, then we can
+ * safely return legacy VGA IO and memory as not decoded since the user
+ * has no way to get to it and routing can be disabled externally at the
+ * bridge.
+ */
+static unsigned int vfio_pci_set_decode(struct pci_dev *pdev, bool single_vga)
+{
+ struct pci_dev *tmp = NULL;
+ unsigned char max_busnr;
+ unsigned int decodes;
+
+ if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
+ VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+
+ max_busnr = pci_bus_max_busnr(pdev->bus);
+ decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+
+ while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
+ if (tmp == pdev ||
+ pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
+ pci_is_root_bus(tmp->bus))
+ continue;
+
+ if (tmp->bus->number >= pdev->bus->number &&
+ tmp->bus->number <= max_busnr) {
+ pci_dev_put(tmp);
+ decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+ break;
+ }
+ }
+
+ return decodes;
+}
+
+static void vfio_pci_probe_mmaps(struct vfio_pci_core_device *vdev)
+{
+ struct resource *res;
+ int i;
+ struct vfio_pci_dummy_resource *dummy_res;
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ int bar = i + PCI_STD_RESOURCES;
+
+ res = &vdev->pdev->resource[bar];
+
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
+ goto no_mmap;
+
+ if (!(res->flags & IORESOURCE_MEM))
+ goto no_mmap;
+
+ /*
+ * The PCI core shouldn't set up a resource with a
+ * type but zero size. But there may be bugs that
+ * cause us to do that.
+ */
+ if (!resource_size(res))
+ goto no_mmap;
+
+ if (resource_size(res) >= PAGE_SIZE) {
+ vdev->bar_mmap_supported[bar] = true;
+ continue;
+ }
+
+ if (!(res->start & ~PAGE_MASK)) {
+ /*
+ * Add a dummy resource to reserve the remainder
+ * of the exclusive page in case that hot-add
+ * device's bar is assigned into it.
+ */
+ dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
+ if (dummy_res == NULL)
+ goto no_mmap;
+
+ dummy_res->resource.name = "vfio sub-page reserved";
+ dummy_res->resource.start = res->end + 1;
+ dummy_res->resource.end = res->start + PAGE_SIZE - 1;
+ dummy_res->resource.flags = res->flags;
+ if (request_resource(res->parent,
+ &dummy_res->resource)) {
+ kfree(dummy_res);
+ goto no_mmap;
+ }
+ dummy_res->index = bar;
+ list_add(&dummy_res->res_next,
+ &vdev->dummy_resources_list);
+ vdev->bar_mmap_supported[bar] = true;
+ continue;
+ }
+ /*
+ * Here we don't handle the case when the BAR is not page
+ * aligned because we can't expect the BAR will be
+ * assigned into the same location in a page in guest
+ * when we passthrough the BAR. And it's hard to access
+ * this BAR in userspace because we have no way to get
+ * the BAR's location in a page.
+ */
+no_mmap:
+ vdev->bar_mmap_supported[bar] = false;
+ }
+}
+
+struct vfio_pci_group_info;
+static bool vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set);
+static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
+ struct vfio_pci_group_info *groups);
+
+/*
+ * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
+ * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
+ * If a device implements the former but not the latter we would typically
+ * expect broken_intx_masking be set and require an exclusive interrupt.
+ * However since we do have control of the device's ability to assert INTx,
+ * we can instead pretend that the device does not implement INTx, virtualizing
+ * the pin register to report zero and maintaining DisINTx set on the host.
+ */
+static bool vfio_pci_nointx(struct pci_dev *pdev)
+{
+ switch (pdev->vendor) {
+ case PCI_VENDOR_ID_INTEL:
+ switch (pdev->device) {
+ /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
+ case 0x1572:
+ case 0x1574:
+ case 0x1580 ... 0x1581:
+ case 0x1583 ... 0x158b:
+ case 0x37d0 ... 0x37d2:
+ /* X550 */
+ case 0x1563:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ return false;
+}
+
+static void vfio_pci_probe_power_state(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u16 pmcsr;
+
+ if (!pdev->pm_cap)
+ return;
+
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+ vdev->needs_pm_restore = !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
+}
+
+/*
+ * pci_set_power_state() wrapper handling devices which perform a soft reset on
+ * D3->D0 transition. Save state prior to D0/1/2->D3, stash it on the vdev,
+ * restore when returned to D0. Saved separately from pci_saved_state for use
+ * by PM capability emulation and separately from pci_dev internal saved state
+ * to avoid it being overwritten and consumed around other resets.
+ */
+int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t state)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ bool needs_restore = false, needs_save = false;
+ int ret;
+
+ if (vdev->needs_pm_restore) {
+ if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
+ pci_save_state(pdev);
+ needs_save = true;
+ }
+
+ if (pdev->current_state >= PCI_D3hot && state <= PCI_D0)
+ needs_restore = true;
+ }
+
+ ret = pci_set_power_state(pdev, state);
+
+ if (!ret) {
+ /* D3 might be unsupported via quirk, skip unless in D3 */
+ if (needs_save && pdev->current_state >= PCI_D3hot) {
+ vdev->pm_save = pci_store_saved_state(pdev);
+ } else if (needs_restore) {
+ pci_load_and_free_saved_state(pdev, &vdev->pm_save);
+ pci_restore_state(pdev);
+ }
+ }
+
+ return ret;
+}
+
+int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int ret;
+ u16 cmd;
+ u8 msix_pos;
+
+ vfio_pci_set_power_state(vdev, PCI_D0);
+
+ /* Don't allow our initial saved state to include busmaster */
+ pci_clear_master(pdev);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ /* If reset fails because of the device lock, fail this path entirely */
+ ret = pci_try_reset_function(pdev);
+ if (ret == -EAGAIN) {
+ pci_disable_device(pdev);
+ return ret;
+ }
+
+ vdev->reset_works = !ret;
+ pci_save_state(pdev);
+ vdev->pci_saved_state = pci_store_saved_state(pdev);
+ if (!vdev->pci_saved_state)
+ pci_dbg(pdev, "%s: Couldn't store saved state\n", __func__);
+
+ if (likely(!nointxmask)) {
+ if (vfio_pci_nointx(pdev)) {
+ pci_info(pdev, "Masking broken INTx support\n");
+ vdev->nointx = true;
+ pci_intx(pdev, 0);
+ } else
+ vdev->pci_2_3 = pci_intx_mask_supported(pdev);
+ }
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
+ cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ ret = vfio_config_init(vdev);
+ if (ret) {
+ kfree(vdev->pci_saved_state);
+ vdev->pci_saved_state = NULL;
+ pci_disable_device(pdev);
+ return ret;
+ }
+
+ msix_pos = pdev->msix_cap;
+ if (msix_pos) {
+ u16 flags;
+ u32 table;
+
+ pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
+ pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
+
+ vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
+ vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
+ vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
+ } else
+ vdev->msix_bar = 0xFF;
+
+ if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
+ vdev->has_vga = true;
+
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_enable);
+
+void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ struct vfio_pci_dummy_resource *dummy_res, *tmp;
+ struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
+ int i, bar;
+
+ /* For needs_reset */
+ lockdep_assert_held(&vdev->vdev.dev_set->lock);
+
+ /* Stop the device from further DMA */
+ pci_clear_master(pdev);
+
+ vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
+ VFIO_IRQ_SET_ACTION_TRIGGER,
+ vdev->irq_type, 0, 0, NULL);
+
+ /* Device closed, don't need mutex here */
+ list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
+ &vdev->ioeventfds_list, next) {
+ vfio_virqfd_disable(&ioeventfd->virqfd);
+ list_del(&ioeventfd->next);
+ kfree(ioeventfd);
+ }
+ vdev->ioeventfds_nr = 0;
+
+ vdev->virq_disabled = false;
+
+ for (i = 0; i < vdev->num_regions; i++)
+ vdev->region[i].ops->release(vdev, &vdev->region[i]);
+
+ vdev->num_regions = 0;
+ kfree(vdev->region);
+ vdev->region = NULL; /* don't krealloc a freed pointer */
+
+ vfio_config_free(vdev);
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ bar = i + PCI_STD_RESOURCES;
+ if (!vdev->barmap[bar])
+ continue;
+ pci_iounmap(pdev, vdev->barmap[bar]);
+ pci_release_selected_regions(pdev, 1 << bar);
+ vdev->barmap[bar] = NULL;
+ }
+
+ list_for_each_entry_safe(dummy_res, tmp,
+ &vdev->dummy_resources_list, res_next) {
+ list_del(&dummy_res->res_next);
+ release_resource(&dummy_res->resource);
+ kfree(dummy_res);
+ }
+
+ vdev->needs_reset = true;
+
+ /*
+ * If we have saved state, restore it. If we can reset the device,
+ * even better. Resetting with current state seems better than
+ * nothing, but saving and restoring current state without reset
+ * is just busy work.
+ */
+ if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
+ pci_info(pdev, "%s: Couldn't reload saved state\n", __func__);
+
+ if (!vdev->reset_works)
+ goto out;
+
+ pci_save_state(pdev);
+ }
+
+ /*
+ * Disable INTx and MSI, presumably to avoid spurious interrupts
+ * during reset. Stolen from pci_reset_function()
+ */
+ pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+
+ /*
+ * Try to get the locks ourselves to prevent a deadlock. The
+ * success of this is dependent on being able to lock the device,
+ * which is not always possible.
+ * We can not use the "try" reset interface here, which will
+ * overwrite the previously restored configuration information.
+ */
+ if (vdev->reset_works && pci_dev_trylock(pdev)) {
+ if (!__pci_reset_function_locked(pdev))
+ vdev->needs_reset = false;
+ pci_dev_unlock(pdev);
+ }
+
+ pci_restore_state(pdev);
+out:
+ pci_disable_device(pdev);
+
+ if (!vfio_pci_dev_set_try_reset(vdev->vdev.dev_set) && !disable_idle_d3)
+ vfio_pci_set_power_state(vdev, PCI_D3hot);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
+
+static struct vfio_pci_core_device *get_pf_vdev(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *physfn = pci_physfn(vdev->pdev);
+ struct vfio_device *pf_dev;
+
+ if (!vdev->pdev->is_virtfn)
+ return NULL;
+
+ pf_dev = vfio_device_get_from_dev(&physfn->dev);
+ if (!pf_dev)
+ return NULL;
+
+ if (pci_dev_driver(physfn) != pci_dev_driver(vdev->pdev)) {
+ vfio_device_put(pf_dev);
+ return NULL;
+ }
+
+ return container_of(pf_dev, struct vfio_pci_core_device, vdev);
+}
+
+static void vfio_pci_vf_token_user_add(struct vfio_pci_core_device *vdev, int val)
+{
+ struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
+
+ if (!pf_vdev)
+ return;
+
+ mutex_lock(&pf_vdev->vf_token->lock);
+ pf_vdev->vf_token->users += val;
+ WARN_ON(pf_vdev->vf_token->users < 0);
+ mutex_unlock(&pf_vdev->vf_token->lock);
+
+ vfio_device_put(&pf_vdev->vdev);
+}
+
+void vfio_pci_core_close_device(struct vfio_device *core_vdev)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
+ vfio_pci_vf_token_user_add(vdev, -1);
+ vfio_spapr_pci_eeh_release(vdev->pdev);
+ vfio_pci_core_disable(vdev);
+
+ mutex_lock(&vdev->igate);
+ if (vdev->err_trigger) {
+ eventfd_ctx_put(vdev->err_trigger);
+ vdev->err_trigger = NULL;
+ }
+ if (vdev->req_trigger) {
+ eventfd_ctx_put(vdev->req_trigger);
+ vdev->req_trigger = NULL;
+ }
+ mutex_unlock(&vdev->igate);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_close_device);
+
+void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
+{
+ vfio_pci_probe_mmaps(vdev);
+ vfio_spapr_pci_eeh_open(vdev->pdev);
+ vfio_pci_vf_token_user_add(vdev, 1);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
+
+static int vfio_pci_get_irq_count(struct vfio_pci_core_device *vdev, int irq_type)
+{
+ if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
+ u8 pin;
+
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
+ vdev->nointx || vdev->pdev->is_virtfn)
+ return 0;
+
+ pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
+
+ return pin ? 1 : 0;
+ } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
+ u8 pos;
+ u16 flags;
+
+ pos = vdev->pdev->msi_cap;
+ if (pos) {
+ pci_read_config_word(vdev->pdev,
+ pos + PCI_MSI_FLAGS, &flags);
+ return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
+ }
+ } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
+ u8 pos;
+ u16 flags;
+
+ pos = vdev->pdev->msix_cap;
+ if (pos) {
+ pci_read_config_word(vdev->pdev,
+ pos + PCI_MSIX_FLAGS, &flags);
+
+ return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
+ }
+ } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
+ if (pci_is_pcie(vdev->pdev))
+ return 1;
+ } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
+{
+ (*(int *)data)++;
+ return 0;
+}
+
+struct vfio_pci_fill_info {
+ int max;
+ int cur;
+ struct vfio_pci_dependent_device *devices;
+};
+
+static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
+{
+ struct vfio_pci_fill_info *fill = data;
+ struct iommu_group *iommu_group;
+
+ if (fill->cur == fill->max)
+ return -EAGAIN; /* Something changed, try again */
+
+ iommu_group = iommu_group_get(&pdev->dev);
+ if (!iommu_group)
+ return -EPERM; /* Cannot reset non-isolated devices */
+
+ fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
+ fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
+ fill->devices[fill->cur].bus = pdev->bus->number;
+ fill->devices[fill->cur].devfn = pdev->devfn;
+ fill->cur++;
+ iommu_group_put(iommu_group);
+ return 0;
+}
+
+struct vfio_pci_group_info {
+ int count;
+ struct vfio_group **groups;
+};
+
+static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
+{
+ for (; pdev; pdev = pdev->bus->self)
+ if (pdev->bus == slot->bus)
+ return (pdev->slot == slot);
+ return false;
+}
+
+struct vfio_pci_walk_info {
+ int (*fn)(struct pci_dev *, void *data);
+ void *data;
+ struct pci_dev *pdev;
+ bool slot;
+ int ret;
+};
+
+static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
+{
+ struct vfio_pci_walk_info *walk = data;
+
+ if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
+ walk->ret = walk->fn(pdev, walk->data);
+
+ return walk->ret;
+}
+
+static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
+ int (*fn)(struct pci_dev *,
+ void *data), void *data,
+ bool slot)
+{
+ struct vfio_pci_walk_info walk = {
+ .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
+ };
+
+ pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
+
+ return walk.ret;
+}
+
+static int msix_mmappable_cap(struct vfio_pci_core_device *vdev,
+ struct vfio_info_cap *caps)
+{
+ struct vfio_info_cap_header header = {
+ .id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE,
+ .version = 1
+ };
+
+ return vfio_info_add_capability(caps, &header, sizeof(header));
+}
+
+int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev,
+ unsigned int type, unsigned int subtype,
+ const struct vfio_pci_regops *ops,
+ size_t size, u32 flags, void *data)
+{
+ struct vfio_pci_region *region;
+
+ region = krealloc(vdev->region,
+ (vdev->num_regions + 1) * sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ vdev->region = region;
+ vdev->region[vdev->num_regions].type = type;
+ vdev->region[vdev->num_regions].subtype = subtype;
+ vdev->region[vdev->num_regions].ops = ops;
+ vdev->region[vdev->num_regions].size = size;
+ vdev->region[vdev->num_regions].flags = flags;
+ vdev->region[vdev->num_regions].data = data;
+
+ vdev->num_regions++;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_register_dev_region);
+
+long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ unsigned long minsz;
+
+ if (cmd == VFIO_DEVICE_GET_INFO) {
+ struct vfio_device_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ unsigned long capsz;
+ int ret;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ /* For backward compatibility, cannot require this */
+ capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ if (info.argsz >= capsz) {
+ minsz = capsz;
+ info.cap_offset = 0;
+ }
+
+ info.flags = VFIO_DEVICE_FLAGS_PCI;
+
+ if (vdev->reset_works)
+ info.flags |= VFIO_DEVICE_FLAGS_RESET;
+
+ info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
+ info.num_irqs = VFIO_PCI_NUM_IRQS;
+
+ ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
+ if (ret && ret != -ENODEV) {
+ pci_warn(vdev->pdev, "Failed to setup zPCI info capabilities\n");
+ return ret;
+ }
+
+ if (caps.size) {
+ info.flags |= VFIO_DEVICE_FLAGS_CAPS;
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user((void __user *)arg +
+ sizeof(info), caps.buf,
+ caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info.cap_offset = sizeof(info);
+ }
+
+ kfree(caps.buf);
+ }
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+
+ } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
+ struct pci_dev *pdev = vdev->pdev;
+ struct vfio_region_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ int i, ret;
+
+ minsz = offsetofend(struct vfio_region_info, offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = pdev->cfg_size;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = pci_resource_len(pdev, info.index);
+ if (!info.size) {
+ info.flags = 0;
+ break;
+ }
+
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ if (vdev->bar_mmap_supported[info.index]) {
+ info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
+ if (info.index == vdev->msix_bar) {
+ ret = msix_mmappable_cap(vdev, &caps);
+ if (ret)
+ return ret;
+ }
+ }
+
+ break;
+ case VFIO_PCI_ROM_REGION_INDEX:
+ {
+ void __iomem *io;
+ size_t size;
+ u16 cmd;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.flags = 0;
+
+ /* Report the BAR size, not the ROM size */
+ info.size = pci_resource_len(pdev, info.index);
+ if (!info.size) {
+ /* Shadow ROMs appear as PCI option ROMs */
+ if (pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW)
+ info.size = 0x20000;
+ else
+ break;
+ }
+
+ /*
+ * Is it really there? Enable memory decode for
+ * implicit access in pci_map_rom().
+ */
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ io = pci_map_rom(pdev, &size);
+ if (io) {
+ info.flags = VFIO_REGION_INFO_FLAG_READ;
+ pci_unmap_rom(pdev, io);
+ } else {
+ info.size = 0;
+ }
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+
+ break;
+ }
+ case VFIO_PCI_VGA_REGION_INDEX:
+ if (!vdev->has_vga)
+ return -EINVAL;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0xc0000;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+
+ break;
+ default:
+ {
+ struct vfio_region_info_cap_type cap_type = {
+ .header.id = VFIO_REGION_INFO_CAP_TYPE,
+ .header.version = 1 };
+
+ if (info.index >=
+ VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+ return -EINVAL;
+ info.index = array_index_nospec(info.index,
+ VFIO_PCI_NUM_REGIONS +
+ vdev->num_regions);
+
+ i = info.index - VFIO_PCI_NUM_REGIONS;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = vdev->region[i].size;
+ info.flags = vdev->region[i].flags;
+
+ cap_type.type = vdev->region[i].type;
+ cap_type.subtype = vdev->region[i].subtype;
+
+ ret = vfio_info_add_capability(&caps, &cap_type.header,
+ sizeof(cap_type));
+ if (ret)
+ return ret;
+
+ if (vdev->region[i].ops->add_capability) {
+ ret = vdev->region[i].ops->add_capability(vdev,
+ &vdev->region[i], &caps);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ if (caps.size) {
+ info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ info.cap_offset = 0;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user((void __user *)arg +
+ sizeof(info), caps.buf,
+ caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info.cap_offset = sizeof(info);
+ }
+
+ kfree(caps.buf);
+ }
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+
+ } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
+ struct vfio_irq_info info;
+
+ minsz = offsetofend(struct vfio_irq_info, count);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
+ case VFIO_PCI_REQ_IRQ_INDEX:
+ break;
+ case VFIO_PCI_ERR_IRQ_INDEX:
+ if (pci_is_pcie(vdev->pdev))
+ break;
+ fallthrough;
+ default:
+ return -EINVAL;
+ }
+
+ info.flags = VFIO_IRQ_INFO_EVENTFD;
+
+ info.count = vfio_pci_get_irq_count(vdev, info.index);
+
+ if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
+ info.flags |= (VFIO_IRQ_INFO_MASKABLE |
+ VFIO_IRQ_INFO_AUTOMASKED);
+ else
+ info.flags |= VFIO_IRQ_INFO_NORESIZE;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+
+ } else if (cmd == VFIO_DEVICE_SET_IRQS) {
+ struct vfio_irq_set hdr;
+ u8 *data = NULL;
+ int max, ret = 0;
+ size_t data_size = 0;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ max = vfio_pci_get_irq_count(vdev, hdr.index);
+
+ ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
+ VFIO_PCI_NUM_IRQS, &data_size);
+ if (ret)
+ return ret;
+
+ if (data_size) {
+ data = memdup_user((void __user *)(arg + minsz),
+ data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ }
+
+ mutex_lock(&vdev->igate);
+
+ ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
+ hdr.start, hdr.count, data);
+
+ mutex_unlock(&vdev->igate);
+ kfree(data);
+
+ return ret;
+
+ } else if (cmd == VFIO_DEVICE_RESET) {
+ int ret;
+
+ if (!vdev->reset_works)
+ return -EINVAL;
+
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ ret = pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
+
+ return ret;
+
+ } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
+ struct vfio_pci_hot_reset_info hdr;
+ struct vfio_pci_fill_info fill = { 0 };
+ struct vfio_pci_dependent_device *devices = NULL;
+ bool slot = false;
+ int ret = 0;
+
+ minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (hdr.argsz < minsz)
+ return -EINVAL;
+
+ hdr.flags = 0;
+
+ /* Can we do a slot or bus reset or neither? */
+ if (!pci_probe_reset_slot(vdev->pdev->slot))
+ slot = true;
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
+
+ /* How many devices are affected? */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_count_devs,
+ &fill.max, slot);
+ if (ret)
+ return ret;
+
+ WARN_ON(!fill.max); /* Should always be at least one */
+
+ /*
+ * If there's enough space, fill it now, otherwise return
+ * -ENOSPC and the number of devices affected.
+ */
+ if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
+ ret = -ENOSPC;
+ hdr.count = fill.max;
+ goto reset_info_exit;
+ }
+
+ devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
+ if (!devices)
+ return -ENOMEM;
+
+ fill.devices = devices;
+
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_fill_devs,
+ &fill, slot);
+
+ /*
+ * If a device was removed between counting and filling,
+ * we may come up short of fill.max. If a device was
+ * added, we'll have a return of -EAGAIN above.
+ */
+ if (!ret)
+ hdr.count = fill.cur;
+
+reset_info_exit:
+ if (copy_to_user((void __user *)arg, &hdr, minsz))
+ ret = -EFAULT;
+
+ if (!ret) {
+ if (copy_to_user((void __user *)(arg + minsz), devices,
+ hdr.count * sizeof(*devices)))
+ ret = -EFAULT;
+ }
+
+ kfree(devices);
+ return ret;
+
+ } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
+ struct vfio_pci_hot_reset hdr;
+ int32_t *group_fds;
+ struct vfio_group **groups;
+ struct vfio_pci_group_info info;
+ bool slot = false;
+ int group_idx, count = 0, ret = 0;
+
+ minsz = offsetofend(struct vfio_pci_hot_reset, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (hdr.argsz < minsz || hdr.flags)
+ return -EINVAL;
+
+ /* Can we do a slot or bus reset or neither? */
+ if (!pci_probe_reset_slot(vdev->pdev->slot))
+ slot = true;
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
+
+ /*
+ * We can't let userspace give us an arbitrarily large
+ * buffer to copy, so verify how many we think there
+ * could be. Note groups can have multiple devices so
+ * one group per device is the max.
+ */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_count_devs,
+ &count, slot);
+ if (ret)
+ return ret;
+
+ /* Somewhere between 1 and count is OK */
+ if (!hdr.count || hdr.count > count)
+ return -EINVAL;
+
+ group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
+ groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
+ if (!group_fds || !groups) {
+ kfree(group_fds);
+ kfree(groups);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(group_fds, (void __user *)(arg + minsz),
+ hdr.count * sizeof(*group_fds))) {
+ kfree(group_fds);
+ kfree(groups);
+ return -EFAULT;
+ }
+
+ /*
+ * For each group_fd, get the group through the vfio external
+ * user interface and store the group and iommu ID. This
+ * ensures the group is held across the reset.
+ */
+ for (group_idx = 0; group_idx < hdr.count; group_idx++) {
+ struct vfio_group *group;
+ struct fd f = fdget(group_fds[group_idx]);
+ if (!f.file) {
+ ret = -EBADF;
+ break;
+ }
+
+ group = vfio_group_get_external_user(f.file);
+ fdput(f);
+ if (IS_ERR(group)) {
+ ret = PTR_ERR(group);
+ break;
+ }
+
+ groups[group_idx] = group;
+ }
+
+ kfree(group_fds);
+
+ /* release reference to groups on error */
+ if (ret)
+ goto hot_reset_release;
+
+ info.count = hdr.count;
+ info.groups = groups;
+
+ ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
+
+hot_reset_release:
+ for (group_idx--; group_idx >= 0; group_idx--)
+ vfio_group_put_external_user(groups[group_idx]);
+
+ kfree(groups);
+ return ret;
+ } else if (cmd == VFIO_DEVICE_IOEVENTFD) {
+ struct vfio_device_ioeventfd ioeventfd;
+ int count;
+
+ minsz = offsetofend(struct vfio_device_ioeventfd, fd);
+
+ if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (ioeventfd.argsz < minsz)
+ return -EINVAL;
+
+ if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
+ return -EINVAL;
+
+ count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
+
+ if (hweight8(count) != 1 || ioeventfd.fd < -1)
+ return -EINVAL;
+
+ return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
+ ioeventfd.data, count, ioeventfd.fd);
+ } else if (cmd == VFIO_DEVICE_FEATURE) {
+ struct vfio_device_feature feature;
+ uuid_t uuid;
+
+ minsz = offsetofend(struct vfio_device_feature, flags);
+
+ if (copy_from_user(&feature, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (feature.argsz < minsz)
+ return -EINVAL;
+
+ /* Check unknown flags */
+ if (feature.flags & ~(VFIO_DEVICE_FEATURE_MASK |
+ VFIO_DEVICE_FEATURE_SET |
+ VFIO_DEVICE_FEATURE_GET |
+ VFIO_DEVICE_FEATURE_PROBE))
+ return -EINVAL;
+
+ /* GET & SET are mutually exclusive except with PROBE */
+ if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) &&
+ (feature.flags & VFIO_DEVICE_FEATURE_SET) &&
+ (feature.flags & VFIO_DEVICE_FEATURE_GET))
+ return -EINVAL;
+
+ switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) {
+ case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
+ if (!vdev->vf_token)
+ return -ENOTTY;
+
+ /*
+ * We do not support GET of the VF Token UUID as this
+ * could expose the token of the previous device user.
+ */
+ if (feature.flags & VFIO_DEVICE_FEATURE_GET)
+ return -EINVAL;
+
+ if (feature.flags & VFIO_DEVICE_FEATURE_PROBE)
+ return 0;
+
+ /* Don't SET unless told to do so */
+ if (!(feature.flags & VFIO_DEVICE_FEATURE_SET))
+ return -EINVAL;
+
+ if (feature.argsz < minsz + sizeof(uuid))
+ return -EINVAL;
+
+ if (copy_from_user(&uuid, (void __user *)(arg + minsz),
+ sizeof(uuid)))
+ return -EFAULT;
+
+ mutex_lock(&vdev->vf_token->lock);
+ uuid_copy(&vdev->vf_token->uuid, &uuid);
+ mutex_unlock(&vdev->vf_token->lock);
+
+ return 0;
+ default:
+ return -ENOTTY;
+ }
+ }
+
+ return -ENOTTY;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl);
+
+static ssize_t vfio_pci_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+ return -EINVAL;
+
+ switch (index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
+
+ case VFIO_PCI_ROM_REGION_INDEX:
+ if (iswrite)
+ return -EINVAL;
+ return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
+
+ case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
+
+ case VFIO_PCI_VGA_REGION_INDEX:
+ return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
+ default:
+ index -= VFIO_PCI_NUM_REGIONS;
+ return vdev->region[index].ops->rw(vdev, buf,
+ count, ppos, iswrite);
+ }
+
+ return -EINVAL;
+}
+
+ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
+ if (!count)
+ return 0;
+
+ return vfio_pci_rw(vdev, buf, count, ppos, false);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_read);
+
+ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
+ if (!count)
+ return 0;
+
+ return vfio_pci_rw(vdev, (char __user *)buf, count, ppos, true);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_write);
+
+/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
+static int vfio_pci_zap_and_vma_lock(struct vfio_pci_core_device *vdev, bool try)
+{
+ struct vfio_pci_mmap_vma *mmap_vma, *tmp;
+
+ /*
+ * Lock ordering:
+ * vma_lock is nested under mmap_lock for vm_ops callback paths.
+ * The memory_lock semaphore is used by both code paths calling
+ * into this function to zap vmas and the vm_ops.fault callback
+ * to protect the memory enable state of the device.
+ *
+ * When zapping vmas we need to maintain the mmap_lock => vma_lock
+ * ordering, which requires using vma_lock to walk vma_list to
+ * acquire an mm, then dropping vma_lock to get the mmap_lock and
+ * reacquiring vma_lock. This logic is derived from similar
+ * requirements in uverbs_user_mmap_disassociate().
+ *
+ * mmap_lock must always be the top-level lock when it is taken.
+ * Therefore we can only hold the memory_lock write lock when
+ * vma_list is empty, as we'd need to take mmap_lock to clear
+ * entries. vma_list can only be guaranteed empty when holding
+ * vma_lock, thus memory_lock is nested under vma_lock.
+ *
+ * This enables the vm_ops.fault callback to acquire vma_lock,
+ * followed by memory_lock read lock, while already holding
+ * mmap_lock without risk of deadlock.
+ */
+ while (1) {
+ struct mm_struct *mm = NULL;
+
+ if (try) {
+ if (!mutex_trylock(&vdev->vma_lock))
+ return 0;
+ } else {
+ mutex_lock(&vdev->vma_lock);
+ }
+ while (!list_empty(&vdev->vma_list)) {
+ mmap_vma = list_first_entry(&vdev->vma_list,
+ struct vfio_pci_mmap_vma,
+ vma_next);
+ mm = mmap_vma->vma->vm_mm;
+ if (mmget_not_zero(mm))
+ break;
+
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+ mm = NULL;
+ }
+ if (!mm)
+ return 1;
+ mutex_unlock(&vdev->vma_lock);
+
+ if (try) {
+ if (!mmap_read_trylock(mm)) {
+ mmput(mm);
+ return 0;
+ }
+ } else {
+ mmap_read_lock(mm);
+ }
+ if (try) {
+ if (!mutex_trylock(&vdev->vma_lock)) {
+ mmap_read_unlock(mm);
+ mmput(mm);
+ return 0;
+ }
+ } else {
+ mutex_lock(&vdev->vma_lock);
+ }
+ list_for_each_entry_safe(mmap_vma, tmp,
+ &vdev->vma_list, vma_next) {
+ struct vm_area_struct *vma = mmap_vma->vma;
+
+ if (vma->vm_mm != mm)
+ continue;
+
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+
+ zap_vma_ptes(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start);
+ }
+ mutex_unlock(&vdev->vma_lock);
+ mmap_read_unlock(mm);
+ mmput(mm);
+ }
+}
+
+void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev)
+{
+ vfio_pci_zap_and_vma_lock(vdev, false);
+ down_write(&vdev->memory_lock);
+ mutex_unlock(&vdev->vma_lock);
+}
+
+u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev)
+{
+ u16 cmd;
+
+ down_write(&vdev->memory_lock);
+ pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
+ if (!(cmd & PCI_COMMAND_MEMORY))
+ pci_write_config_word(vdev->pdev, PCI_COMMAND,
+ cmd | PCI_COMMAND_MEMORY);
+
+ return cmd;
+}
+
+void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 cmd)
+{
+ pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
+ up_write(&vdev->memory_lock);
+}
+
+/* Caller holds vma_lock */
+static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev,
+ struct vm_area_struct *vma)
+{
+ struct vfio_pci_mmap_vma *mmap_vma;
+
+ mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
+ if (!mmap_vma)
+ return -ENOMEM;
+
+ mmap_vma->vma = vma;
+ list_add(&mmap_vma->vma_next, &vdev->vma_list);
+
+ return 0;
+}
+
+/*
+ * Zap mmaps on open so that we can fault them in on access and therefore
+ * our vma_list only tracks mappings accessed since last zap.
+ */
+static void vfio_pci_mmap_open(struct vm_area_struct *vma)
+{
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+}
+
+static void vfio_pci_mmap_close(struct vm_area_struct *vma)
+{
+ struct vfio_pci_core_device *vdev = vma->vm_private_data;
+ struct vfio_pci_mmap_vma *mmap_vma;
+
+ mutex_lock(&vdev->vma_lock);
+ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
+ if (mmap_vma->vma == vma) {
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+ break;
+ }
+ }
+ mutex_unlock(&vdev->vma_lock);
+}
+
+static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct vfio_pci_core_device *vdev = vma->vm_private_data;
+ struct vfio_pci_mmap_vma *mmap_vma;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+
+ mutex_lock(&vdev->vma_lock);
+ down_read(&vdev->memory_lock);
+
+ if (!__vfio_pci_memory_enabled(vdev)) {
+ ret = VM_FAULT_SIGBUS;
+ goto up_out;
+ }
+
+ /*
+ * We populate the whole vma on fault, so we need to test whether
+ * the vma has already been mapped, such as for concurrent faults
+ * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
+ * we ask it to fill the same range again.
+ */
+ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
+ if (mmap_vma->vma == vma)
+ goto up_out;
+ }
+
+ if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ ret = VM_FAULT_SIGBUS;
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+ goto up_out;
+ }
+
+ if (__vfio_pci_add_vma(vdev, vma)) {
+ ret = VM_FAULT_OOM;
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+ }
+
+up_out:
+ up_read(&vdev->memory_lock);
+ mutex_unlock(&vdev->vma_lock);
+ return ret;
+}
+
+static const struct vm_operations_struct vfio_pci_mmap_ops = {
+ .open = vfio_pci_mmap_open,
+ .close = vfio_pci_mmap_close,
+ .fault = vfio_pci_mmap_fault,
+};
+
+int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ struct pci_dev *pdev = vdev->pdev;
+ unsigned int index;
+ u64 phys_len, req_len, pgoff, req_start;
+ int ret;
+
+ index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+ return -EINVAL;
+ if (vma->vm_end < vma->vm_start)
+ return -EINVAL;
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+ if (index >= VFIO_PCI_NUM_REGIONS) {
+ int regnum = index - VFIO_PCI_NUM_REGIONS;
+ struct vfio_pci_region *region = vdev->region + regnum;
+
+ if (region->ops && region->ops->mmap &&
+ (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
+ return region->ops->mmap(vdev, region, vma);
+ return -EINVAL;
+ }
+ if (index >= VFIO_PCI_ROM_REGION_INDEX)
+ return -EINVAL;
+ if (!vdev->bar_mmap_supported[index])
+ return -EINVAL;
+
+ phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
+ req_len = vma->vm_end - vma->vm_start;
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+ req_start = pgoff << PAGE_SHIFT;
+
+ if (req_start + req_len > phys_len)
+ return -EINVAL;
+
+ /*
+ * Even though we don't make use of the barmap for the mmap,
+ * we need to request the region and the barmap tracks that.
+ */
+ if (!vdev->barmap[index]) {
+ ret = pci_request_selected_regions(pdev,
+ 1 << index, "vfio-pci");
+ if (ret)
+ return ret;
+
+ vdev->barmap[index] = pci_iomap(pdev, index, 0);
+ if (!vdev->barmap[index]) {
+ pci_release_selected_regions(pdev, 1 << index);
+ return -ENOMEM;
+ }
+ }
+
+ vma->vm_private_data = vdev;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
+
+ /*
+ * See remap_pfn_range(), called from vfio_pci_fault() but we can't
+ * change vm_flags within the fault handler. Set them now.
+ */
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = &vfio_pci_mmap_ops;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_mmap);
+
+void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ struct pci_dev *pdev = vdev->pdev;
+
+ mutex_lock(&vdev->igate);
+
+ if (vdev->req_trigger) {
+ if (!(count % 10))
+ pci_notice_ratelimited(pdev,
+ "Relaying device request to user (#%u)\n",
+ count);
+ eventfd_signal(vdev->req_trigger, 1);
+ } else if (count == 0) {
+ pci_warn(pdev,
+ "No device request channel registered, blocked until released by user\n");
+ }
+
+ mutex_unlock(&vdev->igate);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_request);
+
+static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
+ bool vf_token, uuid_t *uuid)
+{
+ /*
+ * There's always some degree of trust or collaboration between SR-IOV
+ * PF and VFs, even if just that the PF hosts the SR-IOV capability and
+ * can disrupt VFs with a reset, but often the PF has more explicit
+ * access to deny service to the VF or access data passed through the
+ * VF. We therefore require an opt-in via a shared VF token (UUID) to
+ * represent this trust. This both prevents that a VF driver might
+ * assume the PF driver is a trusted, in-kernel driver, and also that
+ * a PF driver might be replaced with a rogue driver, unknown to in-use
+ * VF drivers.
+ *
+ * Therefore when presented with a VF, if the PF is a vfio device and
+ * it is bound to the vfio-pci driver, the user needs to provide a VF
+ * token to access the device, in the form of appending a vf_token to
+ * the device name, for example:
+ *
+ * "0000:04:10.0 vf_token=bd8d9d2b-5a5f-4f5a-a211-f591514ba1f3"
+ *
+ * When presented with a PF which has VFs in use, the user must also
+ * provide the current VF token to prove collaboration with existing
+ * VF users. If VFs are not in use, the VF token provided for the PF
+ * device will act to set the VF token.
+ *
+ * If the VF token is provided but unused, an error is generated.
+ */
+ if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token)
+ return 0; /* No VF token provided or required */
+
+ if (vdev->pdev->is_virtfn) {
+ struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
+ bool match;
+
+ if (!pf_vdev) {
+ if (!vf_token)
+ return 0; /* PF is not vfio-pci, no VF token */
+
+ pci_info_ratelimited(vdev->pdev,
+ "VF token incorrectly provided, PF not bound to vfio-pci\n");
+ return -EINVAL;
+ }
+
+ if (!vf_token) {
+ vfio_device_put(&pf_vdev->vdev);
+ pci_info_ratelimited(vdev->pdev,
+ "VF token required to access device\n");
+ return -EACCES;
+ }
+
+ mutex_lock(&pf_vdev->vf_token->lock);
+ match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
+ mutex_unlock(&pf_vdev->vf_token->lock);
+
+ vfio_device_put(&pf_vdev->vdev);
+
+ if (!match) {
+ pci_info_ratelimited(vdev->pdev,
+ "Incorrect VF token provided for device\n");
+ return -EACCES;
+ }
+ } else if (vdev->vf_token) {
+ mutex_lock(&vdev->vf_token->lock);
+ if (vdev->vf_token->users) {
+ if (!vf_token) {
+ mutex_unlock(&vdev->vf_token->lock);
+ pci_info_ratelimited(vdev->pdev,
+ "VF token required to access device\n");
+ return -EACCES;
+ }
+
+ if (!uuid_equal(uuid, &vdev->vf_token->uuid)) {
+ mutex_unlock(&vdev->vf_token->lock);
+ pci_info_ratelimited(vdev->pdev,
+ "Incorrect VF token provided for device\n");
+ return -EACCES;
+ }
+ } else if (vf_token) {
+ uuid_copy(&vdev->vf_token->uuid, uuid);
+ }
+
+ mutex_unlock(&vdev->vf_token->lock);
+ } else if (vf_token) {
+ pci_info_ratelimited(vdev->pdev,
+ "VF token incorrectly provided, not a PF or VF\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define VF_TOKEN_ARG "vf_token="
+
+int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ bool vf_token = false;
+ uuid_t uuid;
+ int ret;
+
+ if (strncmp(pci_name(vdev->pdev), buf, strlen(pci_name(vdev->pdev))))
+ return 0; /* No match */
+
+ if (strlen(buf) > strlen(pci_name(vdev->pdev))) {
+ buf += strlen(pci_name(vdev->pdev));
+
+ if (*buf != ' ')
+ return 0; /* No match: non-whitespace after name */
+
+ while (*buf) {
+ if (*buf == ' ') {
+ buf++;
+ continue;
+ }
+
+ if (!vf_token && !strncmp(buf, VF_TOKEN_ARG,
+ strlen(VF_TOKEN_ARG))) {
+ buf += strlen(VF_TOKEN_ARG);
+
+ if (strlen(buf) < UUID_STRING_LEN)
+ return -EINVAL;
+
+ ret = uuid_parse(buf, &uuid);
+ if (ret)
+ return ret;
+
+ vf_token = true;
+ buf += UUID_STRING_LEN;
+ } else {
+ /* Unknown/duplicate option */
+ return -EINVAL;
+ }
+ }
+ }
+
+ ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid);
+ if (ret)
+ return ret;
+
+ return 1; /* Match */
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_match);
+
+static int vfio_pci_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct vfio_pci_core_device *vdev = container_of(nb,
+ struct vfio_pci_core_device, nb);
+ struct device *dev = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *physfn = pci_physfn(pdev);
+
+ if (action == BUS_NOTIFY_ADD_DEVICE &&
+ pdev->is_virtfn && physfn == vdev->pdev) {
+ pci_info(vdev->pdev, "Captured SR-IOV VF %s driver_override\n",
+ pci_name(pdev));
+ pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
+ vdev->vdev.ops->name);
+ } else if (action == BUS_NOTIFY_BOUND_DRIVER &&
+ pdev->is_virtfn && physfn == vdev->pdev) {
+ struct pci_driver *drv = pci_dev_driver(pdev);
+
+ if (drv && drv != pci_dev_driver(vdev->pdev))
+ pci_warn(vdev->pdev,
+ "VF %s bound to driver %s while PF bound to driver %s\n",
+ pci_name(pdev), drv->name,
+ pci_dev_driver(vdev->pdev)->name);
+ }
+
+ return 0;
+}
+
+static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int ret;
+
+ if (!pdev->is_physfn)
+ return 0;
+
+ vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
+ if (!vdev->vf_token)
+ return -ENOMEM;
+
+ mutex_init(&vdev->vf_token->lock);
+ uuid_gen(&vdev->vf_token->uuid);
+
+ vdev->nb.notifier_call = vfio_pci_bus_notifier;
+ ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
+ if (ret) {
+ kfree(vdev->vf_token);
+ return ret;
+ }
+ return 0;
+}
+
+static void vfio_pci_vf_uninit(struct vfio_pci_core_device *vdev)
+{
+ if (!vdev->vf_token)
+ return;
+
+ bus_unregister_notifier(&pci_bus_type, &vdev->nb);
+ WARN_ON(vdev->vf_token->users);
+ mutex_destroy(&vdev->vf_token->lock);
+ kfree(vdev->vf_token);
+}
+
+static int vfio_pci_vga_init(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int ret;
+
+ if (!vfio_pci_is_vga(pdev))
+ return 0;
+
+ ret = vga_client_register(pdev, vfio_pci_set_decode);
+ if (ret)
+ return ret;
+ vga_set_legacy_decoding(pdev, vfio_pci_set_decode(pdev, false));
+ return 0;
+}
+
+static void vfio_pci_vga_uninit(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+
+ if (!vfio_pci_is_vga(pdev))
+ return;
+ vga_client_unregister(pdev);
+ vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
+ VGA_RSRC_LEGACY_IO |
+ VGA_RSRC_LEGACY_MEM);
+}
+
+void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
+ struct pci_dev *pdev,
+ const struct vfio_device_ops *vfio_pci_ops)
+{
+ vfio_init_group_dev(&vdev->vdev, &pdev->dev, vfio_pci_ops);
+ vdev->pdev = pdev;
+ vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ mutex_init(&vdev->igate);
+ spin_lock_init(&vdev->irqlock);
+ mutex_init(&vdev->ioeventfds_lock);
+ INIT_LIST_HEAD(&vdev->dummy_resources_list);
+ INIT_LIST_HEAD(&vdev->ioeventfds_list);
+ mutex_init(&vdev->vma_lock);
+ INIT_LIST_HEAD(&vdev->vma_list);
+ init_rwsem(&vdev->memory_lock);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_init_device);
+
+void vfio_pci_core_uninit_device(struct vfio_pci_core_device *vdev)
+{
+ mutex_destroy(&vdev->igate);
+ mutex_destroy(&vdev->ioeventfds_lock);
+ mutex_destroy(&vdev->vma_lock);
+ vfio_uninit_group_dev(&vdev->vdev);
+ kfree(vdev->region);
+ kfree(vdev->pm_save);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_uninit_device);
+
+int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ struct iommu_group *group;
+ int ret;
+
+ if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
+ return -EINVAL;
+
+ /*
+ * Prevent binding to PFs with VFs enabled, the VFs might be in use
+ * by the host or other users. We cannot capture the VFs if they
+ * already exist, nor can we track VF users. Disabling SR-IOV here
+ * would initiate removing the VFs, which would unbind the driver,
+ * which is prone to blocking if that VF is also in use by vfio-pci.
+ * Just reject these PFs and let the user sort it out.
+ */
+ if (pci_num_vf(pdev)) {
+ pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
+ return -EBUSY;
+ }
+
+ group = vfio_iommu_group_get(&pdev->dev);
+ if (!group)
+ return -EINVAL;
+
+ if (pci_is_root_bus(pdev->bus)) {
+ ret = vfio_assign_device_set(&vdev->vdev, vdev);
+ } else if (!pci_probe_reset_slot(pdev->slot)) {
+ ret = vfio_assign_device_set(&vdev->vdev, pdev->slot);
+ } else {
+ /*
+ * If there is no slot reset support for this device, the whole
+ * bus needs to be grouped together to support bus-wide resets.
+ */
+ ret = vfio_assign_device_set(&vdev->vdev, pdev->bus);
+ }
+
+ if (ret)
+ goto out_group_put;
+ ret = vfio_pci_vf_init(vdev);
+ if (ret)
+ goto out_group_put;
+ ret = vfio_pci_vga_init(vdev);
+ if (ret)
+ goto out_vf;
+
+ vfio_pci_probe_power_state(vdev);
+
+ if (!disable_idle_d3) {
+ /*
+ * pci-core sets the device power state to an unknown value at
+ * bootup and after being removed from a driver. The only
+ * transition it allows from this unknown state is to D0, which
+ * typically happens when a driver calls pci_enable_device().
+ * We're not ready to enable the device yet, but we do want to
+ * be able to get to D3. Therefore first do a D0 transition
+ * before going to D3.
+ */
+ vfio_pci_set_power_state(vdev, PCI_D0);
+ vfio_pci_set_power_state(vdev, PCI_D3hot);
+ }
+
+ ret = vfio_register_group_dev(&vdev->vdev);
+ if (ret)
+ goto out_power;
+ return 0;
+
+out_power:
+ if (!disable_idle_d3)
+ vfio_pci_set_power_state(vdev, PCI_D0);
+out_vf:
+ vfio_pci_vf_uninit(vdev);
+out_group_put:
+ vfio_iommu_group_put(group, &pdev->dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_register_device);
+
+void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+
+ pci_disable_sriov(pdev);
+
+ vfio_unregister_group_dev(&vdev->vdev);
+
+ vfio_pci_vf_uninit(vdev);
+ vfio_pci_vga_uninit(vdev);
+
+ vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
+
+ if (!disable_idle_d3)
+ vfio_pci_set_power_state(vdev, PCI_D0);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_unregister_device);
+
+static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct vfio_pci_core_device *vdev;
+ struct vfio_device *device;
+
+ device = vfio_device_get_from_dev(&pdev->dev);
+ if (device == NULL)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ vdev = container_of(device, struct vfio_pci_core_device, vdev);
+
+ mutex_lock(&vdev->igate);
+
+ if (vdev->err_trigger)
+ eventfd_signal(vdev->err_trigger, 1);
+
+ mutex_unlock(&vdev->igate);
+
+ vfio_device_put(device);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
+{
+ struct vfio_device *device;
+ int ret = 0;
+
+ device = vfio_device_get_from_dev(&pdev->dev);
+ if (!device)
+ return -ENODEV;
+
+ if (nr_virtfn == 0)
+ pci_disable_sriov(pdev);
+ else
+ ret = pci_enable_sriov(pdev, nr_virtfn);
+
+ vfio_device_put(device);
+
+ return ret < 0 ? ret : nr_virtfn;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
+
+const struct pci_error_handlers vfio_pci_core_err_handlers = {
+ .error_detected = vfio_pci_aer_err_detected,
+};
+EXPORT_SYMBOL_GPL(vfio_pci_core_err_handlers);
+
+static bool vfio_dev_in_groups(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_group_info *groups)
+{
+ unsigned int i;
+
+ for (i = 0; i < groups->count; i++)
+ if (groups->groups[i] == vdev->vdev.group)
+ return true;
+ return false;
+}
+
+static int vfio_pci_is_device_in_set(struct pci_dev *pdev, void *data)
+{
+ struct vfio_device_set *dev_set = data;
+ struct vfio_device *cur;
+
+ list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
+ if (cur->dev == &pdev->dev)
+ return 0;
+ return -EBUSY;
+}
+
+/*
+ * vfio-core considers a group to be viable and will create a vfio_device even
+ * if some devices are bound to drivers like pci-stub or pcieport. Here we
+ * require all PCI devices to be inside our dev_set since that ensures they stay
+ * put and that every driver controlling the device can co-ordinate with the
+ * device reset.
+ *
+ * Returns the pci_dev to pass to pci_reset_bus() if every PCI device to be
+ * reset is inside the dev_set, and pci_reset_bus() can succeed. NULL otherwise.
+ */
+static struct pci_dev *
+vfio_pci_dev_set_resettable(struct vfio_device_set *dev_set)
+{
+ struct pci_dev *pdev;
+
+ lockdep_assert_held(&dev_set->lock);
+
+ /*
+ * By definition all PCI devices in the dev_set share the same PCI
+ * reset, so any pci_dev will have the same outcomes for
+ * pci_probe_reset_*() and pci_reset_bus().
+ */
+ pdev = list_first_entry(&dev_set->device_list,
+ struct vfio_pci_core_device,
+ vdev.dev_set_list)->pdev;
+
+ /* pci_reset_bus() is supported */
+ if (pci_probe_reset_slot(pdev->slot) && pci_probe_reset_bus(pdev->bus))
+ return NULL;
+
+ if (vfio_pci_for_each_slot_or_bus(pdev, vfio_pci_is_device_in_set,
+ dev_set,
+ !pci_probe_reset_slot(pdev->slot)))
+ return NULL;
+ return pdev;
+}
+
+/*
+ * We need to get memory_lock for each device, but devices can share mmap_lock,
+ * therefore we need to zap and hold the vma_lock for each device, and only then
+ * get each memory_lock.
+ */
+static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
+ struct vfio_pci_group_info *groups)
+{
+ struct vfio_pci_core_device *cur_mem;
+ struct vfio_pci_core_device *cur_vma;
+ struct vfio_pci_core_device *cur;
+ struct pci_dev *pdev;
+ bool is_mem = true;
+ int ret;
+
+ mutex_lock(&dev_set->lock);
+ cur_mem = list_first_entry(&dev_set->device_list,
+ struct vfio_pci_core_device,
+ vdev.dev_set_list);
+
+ pdev = vfio_pci_dev_set_resettable(dev_set);
+ if (!pdev) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
+ /*
+ * Test whether all the affected devices are contained by the
+ * set of groups provided by the user.
+ */
+ if (!vfio_dev_in_groups(cur_vma, groups)) {
+ ret = -EINVAL;
+ goto err_undo;
+ }
+
+ /*
+ * Locking multiple devices is prone to deadlock, runaway and
+ * unwind if we hit contention.
+ */
+ if (!vfio_pci_zap_and_vma_lock(cur_vma, true)) {
+ ret = -EBUSY;
+ goto err_undo;
+ }
+ }
+ cur_vma = NULL;
+
+ list_for_each_entry(cur_mem, &dev_set->device_list, vdev.dev_set_list) {
+ if (!down_write_trylock(&cur_mem->memory_lock)) {
+ ret = -EBUSY;
+ goto err_undo;
+ }
+ mutex_unlock(&cur_mem->vma_lock);
+ }
+ cur_mem = NULL;
+
+ ret = pci_reset_bus(pdev);
+
+err_undo:
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
+ if (cur == cur_mem)
+ is_mem = false;
+ if (cur == cur_vma)
+ break;
+ if (is_mem)
+ up_write(&cur->memory_lock);
+ else
+ mutex_unlock(&cur->vma_lock);
+ }
+err_unlock:
+ mutex_unlock(&dev_set->lock);
+ return ret;
+}
+
+static bool vfio_pci_dev_set_needs_reset(struct vfio_device_set *dev_set)
+{
+ struct vfio_pci_core_device *cur;
+ bool needs_reset = false;
+
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
+ /* No VFIO device in the set can have an open device FD */
+ if (cur->vdev.open_count)
+ return false;
+ needs_reset |= cur->needs_reset;
+ }
+ return needs_reset;
+}
+
+/*
+ * If a bus or slot reset is available for the provided dev_set and:
+ * - All of the devices affected by that bus or slot reset are unused
+ * - At least one of the affected devices is marked dirty via
+ * needs_reset (such as by lack of FLR support)
+ * Then attempt to perform that bus or slot reset.
+ * Returns true if the dev_set was reset.
+ */
+static bool vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set)
+{
+ struct vfio_pci_core_device *cur;
+ struct pci_dev *pdev;
+ int ret;
+
+ if (!vfio_pci_dev_set_needs_reset(dev_set))
+ return false;
+
+ pdev = vfio_pci_dev_set_resettable(dev_set);
+ if (!pdev)
+ return false;
+
+ ret = pci_reset_bus(pdev);
+ if (ret)
+ return false;
+
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
+ cur->needs_reset = false;
+ if (!disable_idle_d3)
+ vfio_pci_set_power_state(cur, PCI_D3hot);
+ }
+ return true;
+}
+
+void vfio_pci_core_set_params(bool is_nointxmask, bool is_disable_vga,
+ bool is_disable_idle_d3)
+{
+ nointxmask = is_nointxmask;
+ disable_vga = is_disable_vga;
+ disable_idle_d3 = is_disable_idle_d3;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_set_params);
+
+static void vfio_pci_core_cleanup(void)
+{
+ vfio_pci_uninit_perm_bits();
+}
+
+static int __init vfio_pci_core_init(void)
+{
+ /* Allocate shared config space permission data used by all devices */
+ return vfio_pci_init_perm_bits();
+}
+
+module_init(vfio_pci_core_init);
+module_exit(vfio_pci_core_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
index 228df565e9bc..7ca4109bba48 100644
--- a/drivers/vfio/pci/vfio_pci_igd.c
+++ b/drivers/vfio/pci/vfio_pci_igd.c
@@ -15,7 +15,7 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
-#include "vfio_pci_private.h"
+#include <linux/vfio_pci_core.h>
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define OPREGION_SIZE (8 * 1024)
@@ -25,8 +25,9 @@
#define OPREGION_RVDS 0x3c2
#define OPREGION_VERSION 0x16
-static size_t vfio_pci_igd_rw(struct vfio_pci_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite)
+static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
+ char __user *buf, size_t count, loff_t *ppos,
+ bool iswrite)
{
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
void *base = vdev->region[i].data;
@@ -45,7 +46,7 @@ static size_t vfio_pci_igd_rw(struct vfio_pci_device *vdev, char __user *buf,
return count;
}
-static void vfio_pci_igd_release(struct vfio_pci_device *vdev,
+static void vfio_pci_igd_release(struct vfio_pci_core_device *vdev,
struct vfio_pci_region *region)
{
memunmap(region->data);
@@ -56,7 +57,7 @@ static const struct vfio_pci_regops vfio_pci_igd_regops = {
.release = vfio_pci_igd_release,
};
-static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev)
+static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
{
__le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR);
u32 addr, size;
@@ -160,9 +161,9 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev)
return ret;
}
-static size_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev,
- char __user *buf, size_t count, loff_t *ppos,
- bool iswrite)
+static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev,
+ char __user *buf, size_t count, loff_t *ppos,
+ bool iswrite)
{
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
struct pci_dev *pdev = vdev->region[i].data;
@@ -253,7 +254,7 @@ static size_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev,
return count;
}
-static void vfio_pci_igd_cfg_release(struct vfio_pci_device *vdev,
+static void vfio_pci_igd_cfg_release(struct vfio_pci_core_device *vdev,
struct vfio_pci_region *region)
{
struct pci_dev *pdev = region->data;
@@ -266,7 +267,7 @@ static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = {
.release = vfio_pci_igd_cfg_release,
};
-static int vfio_pci_igd_cfg_init(struct vfio_pci_device *vdev)
+static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev)
{
struct pci_dev *host_bridge, *lpc_bridge;
int ret;
@@ -314,7 +315,7 @@ static int vfio_pci_igd_cfg_init(struct vfio_pci_device *vdev)
return 0;
}
-int vfio_pci_igd_init(struct vfio_pci_device *vdev)
+int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
{
int ret;
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 869dce5f134d..6069a11fb51a 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -20,20 +20,20 @@
#include <linux/wait.h>
#include <linux/slab.h>
-#include "vfio_pci_private.h"
+#include <linux/vfio_pci_core.h>
/*
* INTx
*/
static void vfio_send_intx_eventfd(void *opaque, void *unused)
{
- struct vfio_pci_device *vdev = opaque;
+ struct vfio_pci_core_device *vdev = opaque;
if (likely(is_intx(vdev) && !vdev->virq_disabled))
eventfd_signal(vdev->ctx[0].trigger, 1);
}
-void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
+void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
unsigned long flags;
@@ -73,7 +73,7 @@ void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
*/
static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
{
- struct vfio_pci_device *vdev = opaque;
+ struct vfio_pci_core_device *vdev = opaque;
struct pci_dev *pdev = vdev->pdev;
unsigned long flags;
int ret = 0;
@@ -107,7 +107,7 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
return ret;
}
-void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
+void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
{
if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
vfio_send_intx_eventfd(vdev, NULL);
@@ -115,7 +115,7 @@ void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
{
- struct vfio_pci_device *vdev = dev_id;
+ struct vfio_pci_core_device *vdev = dev_id;
unsigned long flags;
int ret = IRQ_NONE;
@@ -139,7 +139,7 @@ static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
return ret;
}
-static int vfio_intx_enable(struct vfio_pci_device *vdev)
+static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
{
if (!is_irq_none(vdev))
return -EINVAL;
@@ -168,7 +168,7 @@ static int vfio_intx_enable(struct vfio_pci_device *vdev)
return 0;
}
-static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
+static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
{
struct pci_dev *pdev = vdev->pdev;
unsigned long irqflags = IRQF_SHARED;
@@ -223,7 +223,7 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
return 0;
}
-static void vfio_intx_disable(struct vfio_pci_device *vdev)
+static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
{
vfio_virqfd_disable(&vdev->ctx[0].unmask);
vfio_virqfd_disable(&vdev->ctx[0].mask);
@@ -244,7 +244,7 @@ static irqreturn_t vfio_msihandler(int irq, void *arg)
return IRQ_HANDLED;
}
-static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
+static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
@@ -285,7 +285,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
return 0;
}
-static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
+static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
int vector, int fd, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
@@ -364,7 +364,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
return 0;
}
-static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
+static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
unsigned count, int32_t *fds, bool msix)
{
int i, j, ret = 0;
@@ -385,7 +385,7 @@ static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
return ret;
}
-static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
+static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
int i;
@@ -417,7 +417,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
/*
* IOCTL support
*/
-static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
+static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
@@ -444,7 +444,7 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
return 0;
}
-static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
+static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
@@ -464,7 +464,7 @@ static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
return 0;
}
-static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
+static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
@@ -507,7 +507,7 @@ static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
return 0;
}
-static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
+static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
@@ -613,7 +613,7 @@ static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
return -EINVAL;
}
-static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
+static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
@@ -624,7 +624,7 @@ static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
count, flags, data);
}
-static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
+static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
@@ -635,11 +635,11 @@ static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
count, flags, data);
}
-int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
+int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
unsigned index, unsigned start, unsigned count,
void *data)
{
- int (*func)(struct vfio_pci_device *vdev, unsigned index,
+ int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
unsigned start, unsigned count, uint32_t flags,
void *data) = NULL;
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
deleted file mode 100644
index 5a36272cecbf..000000000000
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
- * Author: Alex Williamson <alex.williamson@redhat.com>
- *
- * Derived from original vfio:
- * Copyright 2010 Cisco Systems, Inc. All rights reserved.
- * Author: Tom Lyon, pugs@cisco.com
- */
-
-#include <linux/mutex.h>
-#include <linux/pci.h>
-#include <linux/irqbypass.h>
-#include <linux/types.h>
-#include <linux/uuid.h>
-#include <linux/notifier.h>
-
-#ifndef VFIO_PCI_PRIVATE_H
-#define VFIO_PCI_PRIVATE_H
-
-#define VFIO_PCI_OFFSET_SHIFT 40
-
-#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
-#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
-#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
-
-/* Special capability IDs predefined access */
-#define PCI_CAP_ID_INVALID 0xFF /* default raw access */
-#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */
-
-/* Cap maximum number of ioeventfds per device (arbitrary) */
-#define VFIO_PCI_IOEVENTFD_MAX 1000
-
-struct vfio_pci_ioeventfd {
- struct list_head next;
- struct vfio_pci_device *vdev;
- struct virqfd *virqfd;
- void __iomem *addr;
- uint64_t data;
- loff_t pos;
- int bar;
- int count;
- bool test_mem;
-};
-
-struct vfio_pci_irq_ctx {
- struct eventfd_ctx *trigger;
- struct virqfd *unmask;
- struct virqfd *mask;
- char *name;
- bool masked;
- struct irq_bypass_producer producer;
-};
-
-struct vfio_pci_device;
-struct vfio_pci_region;
-
-struct vfio_pci_regops {
- size_t (*rw)(struct vfio_pci_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite);
- void (*release)(struct vfio_pci_device *vdev,
- struct vfio_pci_region *region);
- int (*mmap)(struct vfio_pci_device *vdev,
- struct vfio_pci_region *region,
- struct vm_area_struct *vma);
- int (*add_capability)(struct vfio_pci_device *vdev,
- struct vfio_pci_region *region,
- struct vfio_info_cap *caps);
-};
-
-struct vfio_pci_region {
- u32 type;
- u32 subtype;
- const struct vfio_pci_regops *ops;
- void *data;
- size_t size;
- u32 flags;
-};
-
-struct vfio_pci_dummy_resource {
- struct resource resource;
- int index;
- struct list_head res_next;
-};
-
-struct vfio_pci_reflck {
- struct kref kref;
- struct mutex lock;
-};
-
-struct vfio_pci_vf_token {
- struct mutex lock;
- uuid_t uuid;
- int users;
-};
-
-struct vfio_pci_mmap_vma {
- struct vm_area_struct *vma;
- struct list_head vma_next;
-};
-
-struct vfio_pci_device {
- struct vfio_device vdev;
- struct pci_dev *pdev;
- void __iomem *barmap[PCI_STD_NUM_BARS];
- bool bar_mmap_supported[PCI_STD_NUM_BARS];
- u8 *pci_config_map;
- u8 *vconfig;
- struct perm_bits *msi_perm;
- spinlock_t irqlock;
- struct mutex igate;
- struct vfio_pci_irq_ctx *ctx;
- int num_ctx;
- int irq_type;
- int num_regions;
- struct vfio_pci_region *region;
- u8 msi_qmax;
- u8 msix_bar;
- u16 msix_size;
- u32 msix_offset;
- u32 rbar[7];
- bool pci_2_3;
- bool virq_disabled;
- bool reset_works;
- bool extended_caps;
- bool bardirty;
- bool has_vga;
- bool needs_reset;
- bool nointx;
- bool needs_pm_restore;
- struct pci_saved_state *pci_saved_state;
- struct pci_saved_state *pm_save;
- struct vfio_pci_reflck *reflck;
- int refcnt;
- int ioeventfds_nr;
- struct eventfd_ctx *err_trigger;
- struct eventfd_ctx *req_trigger;
- struct list_head dummy_resources_list;
- struct mutex ioeventfds_lock;
- struct list_head ioeventfds_list;
- struct vfio_pci_vf_token *vf_token;
- struct notifier_block nb;
- struct mutex vma_lock;
- struct list_head vma_list;
- struct rw_semaphore memory_lock;
-};
-
-#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
-#define is_msi(vdev) (vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX)
-#define is_msix(vdev) (vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX)
-#define is_irq_none(vdev) (!(is_intx(vdev) || is_msi(vdev) || is_msix(vdev)))
-#define irq_is(vdev, type) (vdev->irq_type == type)
-
-extern void vfio_pci_intx_mask(struct vfio_pci_device *vdev);
-extern void vfio_pci_intx_unmask(struct vfio_pci_device *vdev);
-
-extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev,
- uint32_t flags, unsigned index,
- unsigned start, unsigned count, void *data);
-
-extern ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite);
-
-extern ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite);
-
-extern ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite);
-
-extern long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
- uint64_t data, int count, int fd);
-
-extern int vfio_pci_init_perm_bits(void);
-extern void vfio_pci_uninit_perm_bits(void);
-
-extern int vfio_config_init(struct vfio_pci_device *vdev);
-extern void vfio_config_free(struct vfio_pci_device *vdev);
-
-extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
- unsigned int type, unsigned int subtype,
- const struct vfio_pci_regops *ops,
- size_t size, u32 flags, void *data);
-
-extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev,
- pci_power_t state);
-
-extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev);
-extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device
- *vdev);
-extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev);
-extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev,
- u16 cmd);
-
-#ifdef CONFIG_VFIO_PCI_IGD
-extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
-#else
-static inline int vfio_pci_igd_init(struct vfio_pci_device *vdev)
-{
- return -ENODEV;
-}
-#endif
-
-#ifdef CONFIG_S390
-extern int vfio_pci_info_zdev_add_caps(struct vfio_pci_device *vdev,
- struct vfio_info_cap *caps);
-#else
-static inline int vfio_pci_info_zdev_add_caps(struct vfio_pci_device *vdev,
- struct vfio_info_cap *caps)
-{
- return -ENODEV;
-}
-#endif
-
-#endif /* VFIO_PCI_PRIVATE_H */
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index a0b5fc8e46f4..57d3b2cbbd8e 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -17,7 +17,7 @@
#include <linux/vfio.h>
#include <linux/vgaarb.h>
-#include "vfio_pci_private.h"
+#include <linux/vfio_pci_core.h>
#ifdef __LITTLE_ENDIAN
#define vfio_ioread64 ioread64
@@ -38,7 +38,7 @@
#define vfio_iowrite8 iowrite8
#define VFIO_IOWRITE(size) \
-static int vfio_pci_iowrite##size(struct vfio_pci_device *vdev, \
+static int vfio_pci_iowrite##size(struct vfio_pci_core_device *vdev, \
bool test_mem, u##size val, void __iomem *io) \
{ \
if (test_mem) { \
@@ -65,7 +65,7 @@ VFIO_IOWRITE(64)
#endif
#define VFIO_IOREAD(size) \
-static int vfio_pci_ioread##size(struct vfio_pci_device *vdev, \
+static int vfio_pci_ioread##size(struct vfio_pci_core_device *vdev, \
bool test_mem, u##size *val, void __iomem *io) \
{ \
if (test_mem) { \
@@ -94,7 +94,7 @@ VFIO_IOREAD(32)
* reads with -1. This is intended for handling MSI-X vector tables and
* leftover space for ROM BARs.
*/
-static ssize_t do_io_rw(struct vfio_pci_device *vdev, bool test_mem,
+static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
void __iomem *io, char __user *buf,
loff_t off, size_t count, size_t x_start,
size_t x_end, bool iswrite)
@@ -200,7 +200,7 @@ static ssize_t do_io_rw(struct vfio_pci_device *vdev, bool test_mem,
return done;
}
-static int vfio_pci_setup_barmap(struct vfio_pci_device *vdev, int bar)
+static int vfio_pci_setup_barmap(struct vfio_pci_core_device *vdev, int bar)
{
struct pci_dev *pdev = vdev->pdev;
int ret;
@@ -224,7 +224,7 @@ static int vfio_pci_setup_barmap(struct vfio_pci_device *vdev, int bar)
return 0;
}
-ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
struct pci_dev *pdev = vdev->pdev;
@@ -288,7 +288,7 @@ out:
return done;
}
-ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
+ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
int ret;
@@ -384,7 +384,7 @@ static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
{
struct vfio_pci_ioeventfd *ioeventfd = opaque;
- struct vfio_pci_device *vdev = ioeventfd->vdev;
+ struct vfio_pci_core_device *vdev = ioeventfd->vdev;
if (ioeventfd->test_mem) {
if (!down_read_trylock(&vdev->memory_lock))
@@ -410,7 +410,7 @@ static void vfio_pci_ioeventfd_thread(void *opaque, void *unused)
vfio_pci_ioeventfd_do_write(ioeventfd, ioeventfd->test_mem);
}
-long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
+long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
uint64_t data, int count, int fd)
{
struct pci_dev *pdev = vdev->pdev;
diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c
index 7b011b62c766..ea4c0d2b0663 100644
--- a/drivers/vfio/pci/vfio_pci_zdev.c
+++ b/drivers/vfio/pci/vfio_pci_zdev.c
@@ -1,15 +1,10 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO ZPCI devices support
*
* Copyright (C) IBM Corp. 2020. All rights reserved.
* Author(s): Pierre Morel <pmorel@linux.ibm.com>
* Matthew Rosato <mjrosato@linux.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/io.h>
#include <linux/pci.h>
@@ -19,7 +14,7 @@
#include <asm/pci_clp.h>
#include <asm/pci_io.h>
-#include "vfio_pci_private.h"
+#include <linux/vfio_pci_core.h>
/*
* Add the Base PCI Function information to the device info region.
@@ -114,7 +109,7 @@ static int zpci_pfip_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
/*
* Add all supported capabilities to the VFIO_DEVICE_GET_INFO capability chain.
*/
-int vfio_pci_info_zdev_add_caps(struct vfio_pci_device *vdev,
+int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
struct vfio_info_cap *caps)
{
struct zpci_dev *zdev = to_zpci(vdev->pdev);
diff --git a/drivers/vfio/platform/Kconfig b/drivers/vfio/platform/Kconfig
index ab341108a0be..331a5920f5ab 100644
--- a/drivers/vfio/platform/Kconfig
+++ b/drivers/vfio/platform/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VFIO_PLATFORM
tristate "VFIO support for platform devices"
- depends on VFIO && EVENTFD && (ARM || ARM64 || COMPILE_TEST)
+ depends on ARM || ARM64 || COMPILE_TEST
select VFIO_VIRQFD
help
Support for platform devices with VFIO. This is required to make
@@ -10,9 +10,10 @@ config VFIO_PLATFORM
If you don't know what to do here, say N.
+if VFIO_PLATFORM
config VFIO_AMBA
tristate "VFIO support for AMBA devices"
- depends on VFIO_PLATFORM && (ARM_AMBA || COMPILE_TEST)
+ depends on ARM_AMBA || COMPILE_TEST
help
Support for ARM AMBA devices with VFIO. This is required to make
use of ARM AMBA devices present on the system using the VFIO
@@ -21,3 +22,4 @@ config VFIO_AMBA
If you don't know what to do here, say N.
source "drivers/vfio/platform/reset/Kconfig"
+endif
diff --git a/drivers/vfio/platform/reset/Kconfig b/drivers/vfio/platform/reset/Kconfig
index 1edbe9ee7356..12f5f3d80387 100644
--- a/drivers/vfio/platform/reset/Kconfig
+++ b/drivers/vfio/platform/reset/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config VFIO_PLATFORM_CALXEDAXGMAC_RESET
tristate "VFIO support for calxeda xgmac reset"
- depends on VFIO_PLATFORM
help
Enables the VFIO platform driver to handle reset for Calxeda xgmac
@@ -9,7 +8,6 @@ config VFIO_PLATFORM_CALXEDAXGMAC_RESET
config VFIO_PLATFORM_AMDXGBE_RESET
tristate "VFIO support for AMD XGBE reset"
- depends on VFIO_PLATFORM
help
Enables the VFIO platform driver to handle reset for AMD XGBE
@@ -17,7 +15,7 @@ config VFIO_PLATFORM_AMDXGBE_RESET
config VFIO_PLATFORM_BCMFLEXRM_RESET
tristate "VFIO support for Broadcom FlexRM reset"
- depends on VFIO_PLATFORM && (ARCH_BCM_IPROC || COMPILE_TEST)
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
default ARCH_BCM_IPROC
help
Enables the VFIO platform driver to handle reset for Broadcom FlexRM
diff --git a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
index 96064ef8f629..1131ebe4837d 100644
--- a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
+++ b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 703164df7637..6af7ce7d619c 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -218,65 +218,52 @@ static int vfio_platform_call_reset(struct vfio_platform_device *vdev,
return -EINVAL;
}
-static void vfio_platform_release(struct vfio_device *core_vdev)
+static void vfio_platform_close_device(struct vfio_device *core_vdev)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
+ const char *extra_dbg = NULL;
+ int ret;
- mutex_lock(&driver_lock);
-
- if (!(--vdev->refcnt)) {
- const char *extra_dbg = NULL;
- int ret;
-
- ret = vfio_platform_call_reset(vdev, &extra_dbg);
- if (ret && vdev->reset_required) {
- dev_warn(vdev->device, "reset driver is required and reset call failed in release (%d) %s\n",
- ret, extra_dbg ? extra_dbg : "");
- WARN_ON(1);
- }
- pm_runtime_put(vdev->device);
- vfio_platform_regions_cleanup(vdev);
- vfio_platform_irq_cleanup(vdev);
+ ret = vfio_platform_call_reset(vdev, &extra_dbg);
+ if (WARN_ON(ret && vdev->reset_required)) {
+ dev_warn(
+ vdev->device,
+ "reset driver is required and reset call failed in release (%d) %s\n",
+ ret, extra_dbg ? extra_dbg : "");
}
-
- mutex_unlock(&driver_lock);
+ pm_runtime_put(vdev->device);
+ vfio_platform_regions_cleanup(vdev);
+ vfio_platform_irq_cleanup(vdev);
}
-static int vfio_platform_open(struct vfio_device *core_vdev)
+static int vfio_platform_open_device(struct vfio_device *core_vdev)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
+ const char *extra_dbg = NULL;
int ret;
- mutex_lock(&driver_lock);
-
- if (!vdev->refcnt) {
- const char *extra_dbg = NULL;
-
- ret = vfio_platform_regions_init(vdev);
- if (ret)
- goto err_reg;
+ ret = vfio_platform_regions_init(vdev);
+ if (ret)
+ return ret;
- ret = vfio_platform_irq_init(vdev);
- if (ret)
- goto err_irq;
+ ret = vfio_platform_irq_init(vdev);
+ if (ret)
+ goto err_irq;
- ret = pm_runtime_get_sync(vdev->device);
- if (ret < 0)
- goto err_rst;
+ ret = pm_runtime_get_sync(vdev->device);
+ if (ret < 0)
+ goto err_rst;
- ret = vfio_platform_call_reset(vdev, &extra_dbg);
- if (ret && vdev->reset_required) {
- dev_warn(vdev->device, "reset driver is required and reset call failed in open (%d) %s\n",
- ret, extra_dbg ? extra_dbg : "");
- goto err_rst;
- }
+ ret = vfio_platform_call_reset(vdev, &extra_dbg);
+ if (ret && vdev->reset_required) {
+ dev_warn(
+ vdev->device,
+ "reset driver is required and reset call failed in open (%d) %s\n",
+ ret, extra_dbg ? extra_dbg : "");
+ goto err_rst;
}
-
- vdev->refcnt++;
-
- mutex_unlock(&driver_lock);
return 0;
err_rst:
@@ -284,8 +271,6 @@ err_rst:
vfio_platform_irq_cleanup(vdev);
err_irq:
vfio_platform_regions_cleanup(vdev);
-err_reg:
- mutex_unlock(&driver_lock);
return ret;
}
@@ -616,8 +601,8 @@ static int vfio_platform_mmap(struct vfio_device *core_vdev, struct vm_area_stru
static const struct vfio_device_ops vfio_platform_ops = {
.name = "vfio-platform",
- .open = vfio_platform_open,
- .release = vfio_platform_release,
+ .open_device = vfio_platform_open_device,
+ .close_device = vfio_platform_close_device,
.ioctl = vfio_platform_ioctl,
.read = vfio_platform_read,
.write = vfio_platform_write,
@@ -667,7 +652,7 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev,
ret = vfio_platform_of_probe(vdev, dev);
if (ret)
- return ret;
+ goto out_uninit;
vdev->device = dev;
@@ -675,7 +660,7 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev,
if (ret && vdev->reset_required) {
dev_err(dev, "No reset function found for device %s\n",
vdev->name);
- return ret;
+ goto out_uninit;
}
group = vfio_iommu_group_get(dev);
@@ -698,6 +683,8 @@ put_iommu:
vfio_iommu_group_put(group, dev);
put_reset:
vfio_platform_put_reset(vdev);
+out_uninit:
+ vfio_uninit_group_dev(&vdev->vdev);
return ret;
}
EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
@@ -708,6 +695,7 @@ void vfio_platform_remove_common(struct vfio_platform_device *vdev)
pm_runtime_disable(vdev->device);
vfio_platform_put_reset(vdev);
+ vfio_uninit_group_dev(&vdev->vdev);
vfio_iommu_group_put(vdev->vdev.dev->iommu_group, vdev->vdev.dev);
}
EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
diff --git a/drivers/vfio/platform/vfio_platform_private.h b/drivers/vfio/platform/vfio_platform_private.h
index dfb834c13659..520d2a8e8375 100644
--- a/drivers/vfio/platform/vfio_platform_private.h
+++ b/drivers/vfio/platform/vfio_platform_private.h
@@ -48,7 +48,6 @@ struct vfio_platform_device {
u32 num_regions;
struct vfio_platform_irq *irqs;
u32 num_irqs;
- int refcnt;
struct mutex igate;
const char *compat;
const char *acpihid;
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 02cc51ce6891..3c034fe14ccb 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -96,6 +96,79 @@ module_param_named(enable_unsafe_noiommu_mode,
MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
#endif
+static DEFINE_XARRAY(vfio_device_set_xa);
+
+int vfio_assign_device_set(struct vfio_device *device, void *set_id)
+{
+ unsigned long idx = (unsigned long)set_id;
+ struct vfio_device_set *new_dev_set;
+ struct vfio_device_set *dev_set;
+
+ if (WARN_ON(!set_id))
+ return -EINVAL;
+
+ /*
+ * Atomically acquire a singleton object in the xarray for this set_id
+ */
+ xa_lock(&vfio_device_set_xa);
+ dev_set = xa_load(&vfio_device_set_xa, idx);
+ if (dev_set)
+ goto found_get_ref;
+ xa_unlock(&vfio_device_set_xa);
+
+ new_dev_set = kzalloc(sizeof(*new_dev_set), GFP_KERNEL);
+ if (!new_dev_set)
+ return -ENOMEM;
+ mutex_init(&new_dev_set->lock);
+ INIT_LIST_HEAD(&new_dev_set->device_list);
+ new_dev_set->set_id = set_id;
+
+ xa_lock(&vfio_device_set_xa);
+ dev_set = __xa_cmpxchg(&vfio_device_set_xa, idx, NULL, new_dev_set,
+ GFP_KERNEL);
+ if (!dev_set) {
+ dev_set = new_dev_set;
+ goto found_get_ref;
+ }
+
+ kfree(new_dev_set);
+ if (xa_is_err(dev_set)) {
+ xa_unlock(&vfio_device_set_xa);
+ return xa_err(dev_set);
+ }
+
+found_get_ref:
+ dev_set->device_count++;
+ xa_unlock(&vfio_device_set_xa);
+ mutex_lock(&dev_set->lock);
+ device->dev_set = dev_set;
+ list_add_tail(&device->dev_set_list, &dev_set->device_list);
+ mutex_unlock(&dev_set->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vfio_assign_device_set);
+
+static void vfio_release_device_set(struct vfio_device *device)
+{
+ struct vfio_device_set *dev_set = device->dev_set;
+
+ if (!dev_set)
+ return;
+
+ mutex_lock(&dev_set->lock);
+ list_del(&device->dev_set_list);
+ mutex_unlock(&dev_set->lock);
+
+ xa_lock(&vfio_device_set_xa);
+ if (!--dev_set->device_count) {
+ __xa_erase(&vfio_device_set_xa,
+ (unsigned long)dev_set->set_id);
+ mutex_destroy(&dev_set->lock);
+ kfree(dev_set);
+ }
+ xa_unlock(&vfio_device_set_xa);
+}
+
/*
* vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
* and remove functions, any use cases other than acquiring the first
@@ -749,12 +822,25 @@ void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
}
EXPORT_SYMBOL_GPL(vfio_init_group_dev);
+void vfio_uninit_group_dev(struct vfio_device *device)
+{
+ vfio_release_device_set(device);
+}
+EXPORT_SYMBOL_GPL(vfio_uninit_group_dev);
+
int vfio_register_group_dev(struct vfio_device *device)
{
struct vfio_device *existing_device;
struct iommu_group *iommu_group;
struct vfio_group *group;
+ /*
+ * If the driver doesn't specify a set then the device is added to a
+ * singleton set just for itself.
+ */
+ if (!device->dev_set)
+ vfio_assign_device_set(device, device);
+
iommu_group = iommu_group_get(device->dev);
if (!iommu_group)
return -EINVAL;
@@ -1356,7 +1442,8 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
{
struct vfio_device *device;
struct file *filep;
- int ret;
+ int fdno;
+ int ret = 0;
if (0 == atomic_read(&group->container_users) ||
!group->container->iommu_driver || !vfio_group_viable(group))
@@ -1370,38 +1457,32 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
return PTR_ERR(device);
if (!try_module_get(device->dev->driver->owner)) {
- vfio_device_put(device);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_device_put;
}
- ret = device->ops->open(device);
- if (ret) {
- module_put(device->dev->driver->owner);
- vfio_device_put(device);
- return ret;
+ mutex_lock(&device->dev_set->lock);
+ device->open_count++;
+ if (device->open_count == 1 && device->ops->open_device) {
+ ret = device->ops->open_device(device);
+ if (ret)
+ goto err_undo_count;
}
+ mutex_unlock(&device->dev_set->lock);
/*
* We can't use anon_inode_getfd() because we need to modify
* the f_mode flags directly to allow more than just ioctls
*/
- ret = get_unused_fd_flags(O_CLOEXEC);
- if (ret < 0) {
- device->ops->release(device);
- module_put(device->dev->driver->owner);
- vfio_device_put(device);
- return ret;
- }
+ fdno = ret = get_unused_fd_flags(O_CLOEXEC);
+ if (ret < 0)
+ goto err_close_device;
filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
device, O_RDWR);
if (IS_ERR(filep)) {
- put_unused_fd(ret);
ret = PTR_ERR(filep);
- device->ops->release(device);
- module_put(device->dev->driver->owner);
- vfio_device_put(device);
- return ret;
+ goto err_fd;
}
/*
@@ -1413,12 +1494,25 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
atomic_inc(&group->container_users);
- fd_install(ret, filep);
+ fd_install(fdno, filep);
if (group->noiommu)
dev_warn(device->dev, "vfio-noiommu device opened by user "
"(%s:%d)\n", current->comm, task_pid_nr(current));
-
+ return fdno;
+
+err_fd:
+ put_unused_fd(fdno);
+err_close_device:
+ mutex_lock(&device->dev_set->lock);
+ if (device->open_count == 1 && device->ops->close_device)
+ device->ops->close_device(device);
+err_undo_count:
+ device->open_count--;
+ mutex_unlock(&device->dev_set->lock);
+ module_put(device->dev->driver->owner);
+err_device_put:
+ vfio_device_put(device);
return ret;
}
@@ -1556,7 +1650,10 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep)
{
struct vfio_device *device = filep->private_data;
- device->ops->release(device);
+ mutex_lock(&device->dev_set->lock);
+ if (!--device->open_count && device->ops->close_device)
+ device->ops->close_device(device);
+ mutex_unlock(&device->dev_set->lock);
module_put(device->dev->driver->owner);
@@ -2359,6 +2456,7 @@ static void __exit vfio_cleanup(void)
class_destroy(vfio.class);
vfio.class = NULL;
misc_deregister(&vfio_dev);
+ xa_destroy(&vfio_device_set_xa);
}
module_init(vfio_init);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0b4f7c174c7a..0e9217687f5c 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -612,17 +612,17 @@ static int vfio_wait(struct vfio_iommu *iommu)
static int vfio_find_dma_valid(struct vfio_iommu *iommu, dma_addr_t start,
size_t size, struct vfio_dma **dma_p)
{
- int ret;
+ int ret = 0;
do {
*dma_p = vfio_find_dma(iommu, start, size);
if (!*dma_p)
- ret = -EINVAL;
+ return -EINVAL;
else if (!(*dma_p)->vaddr_invalid)
- ret = 0;
+ return ret;
else
ret = vfio_wait(iommu);
- } while (ret > 0);
+ } while (ret == WAITED);
return ret;
}
diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
index 0582079e4bcc..670d56c879e5 100644
--- a/drivers/vhost/iotlb.c
+++ b/drivers/vhost/iotlb.c
@@ -36,19 +36,21 @@ void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
EXPORT_SYMBOL_GPL(vhost_iotlb_map_free);
/**
- * vhost_iotlb_add_range - add a new range to vhost IOTLB
+ * vhost_iotlb_add_range_ctx - add a new range to vhost IOTLB
* @iotlb: the IOTLB
* @start: start of the IOVA range
* @last: last of IOVA range
* @addr: the address that is mapped to @start
* @perm: access permission of this range
+ * @opaque: the opaque pointer for the new mapping
*
* Returns an error last is smaller than start or memory allocation
* fails
*/
-int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
- u64 start, u64 last,
- u64 addr, unsigned int perm)
+int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm,
+ void *opaque)
{
struct vhost_iotlb_map *map;
@@ -71,6 +73,7 @@ int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
map->last = last;
map->addr = addr;
map->perm = perm;
+ map->opaque = opaque;
iotlb->nmaps++;
vhost_iotlb_itree_insert(map, &iotlb->root);
@@ -80,6 +83,15 @@ int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
return 0;
}
+EXPORT_SYMBOL_GPL(vhost_iotlb_add_range_ctx);
+
+int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm)
+{
+ return vhost_iotlb_add_range_ctx(iotlb, start, last,
+ addr, perm, NULL);
+}
EXPORT_SYMBOL_GPL(vhost_iotlb_add_range);
/**
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 46f897e41217..532e204f2b1b 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1,24 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
/*******************************************************************************
* Vhost kernel TCM fabric driver for virtio SCSI initiators
*
* (C) Copyright 2010-2013 Datera, Inc.
* (C) Copyright 2010-2012 IBM Corp.
*
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
- *
* Authors: Nicholas A. Bellinger <nab@daterainc.com>
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
****************************************************************************/
#include <linux/module.h>
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 9479f7f79217..f41d081777f5 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -116,12 +116,13 @@ static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
irq_bypass_unregister_producer(&vq->call_ctx.producer);
}
-static void vhost_vdpa_reset(struct vhost_vdpa *v)
+static int vhost_vdpa_reset(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
- vdpa_reset(vdpa);
v->in_batch = 0;
+
+ return vdpa_reset(vdpa);
}
static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
@@ -157,7 +158,7 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
u8 status, status_old;
- int nvqs = v->nvqs;
+ int ret, nvqs = v->nvqs;
u16 i;
if (copy_from_user(&status, statusp, sizeof(status)))
@@ -172,7 +173,12 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
return -EINVAL;
- ops->set_status(vdpa, status);
+ if (status == 0) {
+ ret = ops->reset(vdpa);
+ if (ret)
+ return ret;
+ } else
+ ops->set_status(vdpa, status);
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
for (i = 0; i < nvqs; i++)
@@ -498,7 +504,7 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
return r;
}
-static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
{
struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
@@ -507,19 +513,44 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
unsigned long pfn, pinned;
while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
- pinned = map->size >> PAGE_SHIFT;
- for (pfn = map->addr >> PAGE_SHIFT;
+ pinned = PFN_DOWN(map->size);
+ for (pfn = PFN_DOWN(map->addr);
pinned > 0; pfn++, pinned--) {
page = pfn_to_page(pfn);
if (map->perm & VHOST_ACCESS_WO)
set_page_dirty_lock(page);
unpin_user_page(page);
}
- atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+ atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
vhost_iotlb_map_free(iotlb, map);
}
}
+static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vhost_iotlb *iotlb = dev->iotlb;
+ struct vhost_iotlb_map *map;
+ struct vdpa_map_file *map_file;
+
+ while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
+ map_file = (struct vdpa_map_file *)map->opaque;
+ fput(map_file->file);
+ kfree(map_file);
+ vhost_iotlb_map_free(iotlb, map);
+ }
+}
+
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+
+ if (vdpa->use_va)
+ return vhost_vdpa_va_unmap(v, start, last);
+
+ return vhost_vdpa_pa_unmap(v, start, last);
+}
+
static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
{
struct vhost_dev *dev = &v->vdev;
@@ -551,21 +582,21 @@ static int perm_to_iommu_flags(u32 perm)
return flags | IOMMU_CACHE;
}
-static int vhost_vdpa_map(struct vhost_vdpa *v,
- u64 iova, u64 size, u64 pa, u32 perm)
+static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
+ u64 size, u64 pa, u32 perm, void *opaque)
{
struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
int r = 0;
- r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
- pa, perm);
+ r = vhost_iotlb_add_range_ctx(dev->iotlb, iova, iova + size - 1,
+ pa, perm, opaque);
if (r)
return r;
if (ops->dma_map) {
- r = ops->dma_map(vdpa, iova, size, pa, perm);
+ r = ops->dma_map(vdpa, iova, size, pa, perm, opaque);
} else if (ops->set_map) {
if (!v->in_batch)
r = ops->set_map(vdpa, dev->iotlb);
@@ -573,13 +604,15 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
r = iommu_map(v->domain, iova, pa, size,
perm_to_iommu_flags(perm));
}
-
- if (r)
+ if (r) {
vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
- else
- atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+ return r;
+ }
- return r;
+ if (!vdpa->use_va)
+ atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
+
+ return 0;
}
static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
@@ -600,38 +633,78 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
}
}
-static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
- struct vhost_iotlb_msg *msg)
+static int vhost_vdpa_va_map(struct vhost_vdpa *v,
+ u64 iova, u64 size, u64 uaddr, u32 perm)
+{
+ struct vhost_dev *dev = &v->vdev;
+ u64 offset, map_size, map_iova = iova;
+ struct vdpa_map_file *map_file;
+ struct vm_area_struct *vma;
+ int ret;
+
+ mmap_read_lock(dev->mm);
+
+ while (size) {
+ vma = find_vma(dev->mm, uaddr);
+ if (!vma) {
+ ret = -EINVAL;
+ break;
+ }
+ map_size = min(size, vma->vm_end - uaddr);
+ if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
+ !(vma->vm_flags & (VM_IO | VM_PFNMAP))))
+ goto next;
+
+ map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
+ if (!map_file) {
+ ret = -ENOMEM;
+ break;
+ }
+ offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
+ map_file->offset = offset;
+ map_file->file = get_file(vma->vm_file);
+ ret = vhost_vdpa_map(v, map_iova, map_size, uaddr,
+ perm, map_file);
+ if (ret) {
+ fput(map_file->file);
+ kfree(map_file);
+ break;
+ }
+next:
+ size -= map_size;
+ uaddr += map_size;
+ map_iova += map_size;
+ }
+ if (ret)
+ vhost_vdpa_unmap(v, iova, map_iova - iova);
+
+ mmap_read_unlock(dev->mm);
+
+ return ret;
+}
+
+static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
+ u64 iova, u64 size, u64 uaddr, u32 perm)
{
struct vhost_dev *dev = &v->vdev;
- struct vhost_iotlb *iotlb = dev->iotlb;
struct page **page_list;
unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
unsigned int gup_flags = FOLL_LONGTERM;
unsigned long npages, cur_base, map_pfn, last_pfn = 0;
unsigned long lock_limit, sz2pin, nchunks, i;
- u64 iova = msg->iova;
+ u64 start = iova;
long pinned;
int ret = 0;
- if (msg->iova < v->range.first || !msg->size ||
- msg->iova > U64_MAX - msg->size + 1 ||
- msg->iova + msg->size - 1 > v->range.last)
- return -EINVAL;
-
- if (vhost_iotlb_itree_first(iotlb, msg->iova,
- msg->iova + msg->size - 1))
- return -EEXIST;
-
/* Limit the use of memory for bookkeeping */
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list)
return -ENOMEM;
- if (msg->perm & VHOST_ACCESS_WO)
+ if (perm & VHOST_ACCESS_WO)
gup_flags |= FOLL_WRITE;
- npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
+ npages = PFN_UP(size + (iova & ~PAGE_MASK));
if (!npages) {
ret = -EINVAL;
goto free;
@@ -639,13 +712,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
mmap_read_lock(dev->mm);
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
ret = -ENOMEM;
goto unlock;
}
- cur_base = msg->uaddr & PAGE_MASK;
+ cur_base = uaddr & PAGE_MASK;
iova &= PAGE_MASK;
nchunks = 0;
@@ -673,10 +746,10 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
if (last_pfn && (this_pfn != last_pfn + 1)) {
/* Pin a contiguous chunk of memory */
- csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
+ csize = PFN_PHYS(last_pfn - map_pfn + 1);
ret = vhost_vdpa_map(v, iova, csize,
- map_pfn << PAGE_SHIFT,
- msg->perm);
+ PFN_PHYS(map_pfn),
+ perm, NULL);
if (ret) {
/*
* Unpin the pages that are left unmapped
@@ -699,13 +772,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
last_pfn = this_pfn;
}
- cur_base += pinned << PAGE_SHIFT;
+ cur_base += PFN_PHYS(pinned);
npages -= pinned;
}
/* Pin the rest chunk */
- ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
- map_pfn << PAGE_SHIFT, msg->perm);
+ ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
+ PFN_PHYS(map_pfn), perm, NULL);
out:
if (ret) {
if (nchunks) {
@@ -724,13 +797,38 @@ out:
for (pfn = map_pfn; pfn <= last_pfn; pfn++)
unpin_user_page(pfn_to_page(pfn));
}
- vhost_vdpa_unmap(v, msg->iova, msg->size);
+ vhost_vdpa_unmap(v, start, size);
}
unlock:
mmap_read_unlock(dev->mm);
free:
free_page((unsigned long)page_list);
return ret;
+
+}
+
+static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vdpa_device *vdpa = v->vdpa;
+ struct vhost_iotlb *iotlb = dev->iotlb;
+
+ if (msg->iova < v->range.first || !msg->size ||
+ msg->iova > U64_MAX - msg->size + 1 ||
+ msg->iova + msg->size - 1 > v->range.last)
+ return -EINVAL;
+
+ if (vhost_iotlb_itree_first(iotlb, msg->iova,
+ msg->iova + msg->size - 1))
+ return -EEXIST;
+
+ if (vdpa->use_va)
+ return vhost_vdpa_va_map(v, msg->iova, msg->size,
+ msg->uaddr, msg->perm);
+
+ return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
+ msg->perm);
}
static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
@@ -860,7 +958,9 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
return -EBUSY;
nvqs = v->nvqs;
- vhost_vdpa_reset(v);
+ r = vhost_vdpa_reset(v);
+ if (r)
+ goto err;
vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
@@ -945,7 +1045,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
- notify.addr >> PAGE_SHIFT, PAGE_SIZE,
+ PFN_DOWN(notify.addr), PAGE_SIZE,
vma->vm_page_prot))
return VM_FAULT_SIGBUS;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index f249622ef11b..938aefbc75ec 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -114,7 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
size_t nbytes;
size_t iov_len, payload_len;
int head;
- bool restore_flag = false;
+ u32 flags_to_restore = 0;
spin_lock_bh(&vsock->send_pkt_list_lock);
if (list_empty(&vsock->send_pkt_list)) {
@@ -178,16 +178,21 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
* small rx buffers, headers of packets in rx queue are
* created dynamically and are initialized with header
* of current packet(except length). But in case of
- * SOCK_SEQPACKET, we also must clear record delimeter
- * bit(VIRTIO_VSOCK_SEQ_EOR). Otherwise, instead of one
- * packet with delimeter(which marks end of record),
- * there will be sequence of packets with delimeter
- * bit set. After initialized header will be copied to
- * rx buffer, this bit will be restored.
+ * SOCK_SEQPACKET, we also must clear message delimeter
+ * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
+ * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
+ * there will be sequence of packets with these
+ * bits set. After initialized header will be copied to
+ * rx buffer, these required bits will be restored.
*/
- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
- pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
- restore_flag = true;
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
+ pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+ flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
+
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
+ pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
+ }
}
}
@@ -224,8 +229,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
* to send it with the next available buffer.
*/
if (pkt->off < pkt->len) {
- if (restore_flag)
- pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ pkt->hdr.flags |= cpu_to_le32(flags_to_restore);
/* We are queueing the same virtio_vsock_pkt to handle
* the remaining bytes, and we want to deliver it
diff --git a/drivers/video/backlight/ktd253-backlight.c b/drivers/video/backlight/ktd253-backlight.c
index a7df5bcca9da..37aa5a669530 100644
--- a/drivers/video/backlight/ktd253-backlight.c
+++ b/drivers/video/backlight/ktd253-backlight.c
@@ -25,6 +25,7 @@
#define KTD253_T_LOW_NS (200 + 10) /* Additional 10ns as safety factor */
#define KTD253_T_HIGH_NS (200 + 10) /* Additional 10ns as safety factor */
+#define KTD253_T_OFF_CRIT_NS 100000 /* 100 us, now it doesn't look good */
#define KTD253_T_OFF_MS 3
struct ktd253_backlight {
@@ -34,13 +35,50 @@ struct ktd253_backlight {
u16 ratio;
};
+static void ktd253_backlight_set_max_ratio(struct ktd253_backlight *ktd253)
+{
+ gpiod_set_value_cansleep(ktd253->gpiod, 1);
+ ndelay(KTD253_T_HIGH_NS);
+ /* We always fall back to this when we power on */
+}
+
+static int ktd253_backlight_stepdown(struct ktd253_backlight *ktd253)
+{
+ /*
+ * These GPIO operations absolutely can NOT sleep so no _cansleep
+ * suffixes, and no using GPIO expanders on slow buses for this!
+ *
+ * The maximum number of cycles of the loop is 32 so the time taken
+ * should nominally be:
+ * (T_LOW_NS + T_HIGH_NS + loop_time) * 32
+ *
+ * Architectures do not always support ndelay() and we will get a few us
+ * instead. If we get to a critical time limit an interrupt has likely
+ * occured in the low part of the loop and we need to restart from the
+ * top so we have the backlight in a known state.
+ */
+ u64 ns;
+
+ ns = ktime_get_ns();
+ gpiod_set_value(ktd253->gpiod, 0);
+ ndelay(KTD253_T_LOW_NS);
+ gpiod_set_value(ktd253->gpiod, 1);
+ ns = ktime_get_ns() - ns;
+ if (ns >= KTD253_T_OFF_CRIT_NS) {
+ dev_err(ktd253->dev, "PCM on backlight took too long (%llu ns)\n", ns);
+ return -EAGAIN;
+ }
+ ndelay(KTD253_T_HIGH_NS);
+ return 0;
+}
+
static int ktd253_backlight_update_status(struct backlight_device *bl)
{
struct ktd253_backlight *ktd253 = bl_get_data(bl);
int brightness = backlight_get_brightness(bl);
u16 target_ratio;
u16 current_ratio = ktd253->ratio;
- unsigned long flags;
+ int ret;
dev_dbg(ktd253->dev, "new brightness/ratio: %d/32\n", brightness);
@@ -62,37 +100,34 @@ static int ktd253_backlight_update_status(struct backlight_device *bl)
}
if (current_ratio == 0) {
- gpiod_set_value_cansleep(ktd253->gpiod, 1);
- ndelay(KTD253_T_HIGH_NS);
- /* We always fall back to this when we power on */
+ ktd253_backlight_set_max_ratio(ktd253);
current_ratio = KTD253_MAX_RATIO;
}
- /*
- * WARNING:
- * The loop to set the correct current level is performed
- * with interrupts disabled as it is timing critical.
- * The maximum number of cycles of the loop is 32
- * so the time taken will be (T_LOW_NS + T_HIGH_NS + loop_time) * 32,
- */
- local_irq_save(flags);
while (current_ratio != target_ratio) {
/*
* These GPIO operations absolutely can NOT sleep so no
* _cansleep suffixes, and no using GPIO expanders on
* slow buses for this!
*/
- gpiod_set_value(ktd253->gpiod, 0);
- ndelay(KTD253_T_LOW_NS);
- gpiod_set_value(ktd253->gpiod, 1);
- ndelay(KTD253_T_HIGH_NS);
- /* After 1/32 we loop back to 32/32 */
- if (current_ratio == KTD253_MIN_RATIO)
+ ret = ktd253_backlight_stepdown(ktd253);
+ if (ret == -EAGAIN) {
+ /*
+ * Something disturbed the backlight setting code when
+ * running so we need to bring the PWM back to a known
+ * state. This shouldn't happen too much.
+ */
+ gpiod_set_value_cansleep(ktd253->gpiod, 0);
+ msleep(KTD253_T_OFF_MS);
+ ktd253_backlight_set_max_ratio(ktd253);
+ current_ratio = KTD253_MAX_RATIO;
+ } else if (current_ratio == KTD253_MIN_RATIO) {
+ /* After 1/32 we loop back to 32/32 */
current_ratio = KTD253_MAX_RATIO;
- else
+ } else {
current_ratio--;
+ }
}
- local_irq_restore(flags);
ktd253->ratio = current_ratio;
dev_dbg(ktd253->dev, "new ratio set to %d/32\n", target_ratio);
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index e48fded3e414..8d8959a70e44 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -409,6 +409,33 @@ static bool pwm_backlight_is_linear(struct platform_pwm_backlight_data *data)
static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
{
struct device_node *node = pb->dev->of_node;
+ bool active = true;
+
+ /*
+ * If the enable GPIO is present, observable (either as input
+ * or output) and off then the backlight is not currently active.
+ * */
+ if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0)
+ active = false;
+
+ if (!regulator_is_enabled(pb->power_supply))
+ active = false;
+
+ if (!pwm_is_enabled(pb->pwm))
+ active = false;
+
+ /*
+ * Synchronize the enable_gpio with the observed state of the
+ * hardware.
+ */
+ if (pb->enable_gpio)
+ gpiod_direction_output(pb->enable_gpio, active);
+
+ /*
+ * Do not change pb->enabled here! pb->enabled essentially
+ * tells us if we own one of the regulator's use counts and
+ * right now we do not.
+ */
/* Not booted with device tree or no phandle link to the node */
if (!node || !node->phandle)
@@ -420,20 +447,7 @@ static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
* assume that another driver will enable the backlight at the
* appropriate time. Therefore, if it is disabled, keep it so.
*/
-
- /* if the enable GPIO is disabled, do not enable the backlight */
- if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0)
- return FB_BLANK_POWERDOWN;
-
- /* The regulator is disabled, do not enable the backlight */
- if (!regulator_is_enabled(pb->power_supply))
- return FB_BLANK_POWERDOWN;
-
- /* The PWM is disabled, keep it like this */
- if (!pwm_is_enabled(pb->pwm))
- return FB_BLANK_POWERDOWN;
-
- return FB_BLANK_UNBLANK;
+ return active ? FB_BLANK_UNBLANK: FB_BLANK_POWERDOWN;
}
static int pwm_backlight_probe(struct platform_device *pdev)
@@ -486,18 +500,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
goto err_alloc;
}
- /*
- * If the GPIO is not known to be already configured as output, that
- * is, if gpiod_get_direction returns either 1 or -EINVAL, change the
- * direction to output and set the GPIO as active.
- * Do not force the GPIO to active when it was already output as it
- * could cause backlight flickering or we would enable the backlight too
- * early. Leave the decision of the initial backlight state for later.
- */
- if (pb->enable_gpio &&
- gpiod_get_direction(pb->enable_gpio) != 0)
- gpiod_direction_output(pb->enable_gpio, 1);
-
pb->power_supply = devm_regulator_get(&pdev->dev, "power");
if (IS_ERR(pb->power_supply)) {
ret = PTR_ERR(pb->power_supply);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 71fb710f1ce3..7420d2c16e47 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -962,6 +962,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
struct fb_var_screeninfo old_var;
struct fb_videomode mode;
struct fb_event event;
+ u32 unused;
if (var->activate & FB_ACTIVATE_INV_MODE) {
struct fb_videomode mode1, mode2;
@@ -1008,6 +1009,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
if (var->xres < 8 || var->yres < 8)
return -EINVAL;
+ /* Too huge resolution causes multiplication overflow. */
+ if (check_mul_overflow(var->xres, var->yres, &unused) ||
+ check_mul_overflow(var->xres_virtual, var->yres_virtual, &unused))
+ return -EINVAL;
+
ret = info->fbops->fb_check_var(var, info);
if (ret)
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 1ea0c1f6a1fd..588e02fb91d3 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -4,6 +4,7 @@
#include <linux/virtio_config.h>
#include <linux/module.h>
#include <linux/idr.h>
+#include <linux/of.h>
#include <uapi/linux/virtio_ids.h>
/* Unique numbering for virtio devices. */
@@ -292,6 +293,8 @@ static void virtio_dev_remove(struct device *_d)
/* Acknowledge the device's existence again. */
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+
+ of_node_put(dev->dev.of_node);
}
static struct bus_type virtio_bus = {
@@ -318,6 +321,43 @@ void unregister_virtio_driver(struct virtio_driver *driver)
}
EXPORT_SYMBOL_GPL(unregister_virtio_driver);
+static int virtio_device_of_init(struct virtio_device *dev)
+{
+ struct device_node *np, *pnode = dev_of_node(dev->dev.parent);
+ char compat[] = "virtio,deviceXXXXXXXX";
+ int ret, count;
+
+ if (!pnode)
+ return 0;
+
+ count = of_get_available_child_count(pnode);
+ if (!count)
+ return 0;
+
+ /* There can be only 1 child node */
+ if (WARN_ON(count > 1))
+ return -EINVAL;
+
+ np = of_get_next_available_child(pnode, NULL);
+ if (WARN_ON(!np))
+ return -ENODEV;
+
+ ret = snprintf(compat, sizeof(compat), "virtio,device%x", dev->id.device);
+ BUG_ON(ret >= sizeof(compat));
+
+ if (!of_device_is_compatible(np, compat)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dev->dev.of_node = np;
+ return 0;
+
+out:
+ of_node_put(np);
+ return ret;
+}
+
/**
* register_virtio_device - register virtio device
* @dev : virtio device to be registered
@@ -342,6 +382,10 @@ int register_virtio_device(struct virtio_device *dev)
dev->index = err;
dev_set_name(&dev->dev, "virtio%u", dev->index);
+ err = virtio_device_of_init(dev);
+ if (err)
+ goto out_ida_remove;
+
spin_lock_init(&dev->config_lock);
dev->config_enabled = false;
dev->config_change_pending = false;
@@ -362,10 +406,16 @@ int register_virtio_device(struct virtio_device *dev)
*/
err = device_add(&dev->dev);
if (err)
- ida_simple_remove(&virtio_index_ida, dev->index);
+ goto out_of_node_put;
+
+ return 0;
+
+out_of_node_put:
+ of_node_put(dev->dev.of_node);
+out_ida_remove:
+ ida_simple_remove(&virtio_index_ida, dev->index);
out:
- if (err)
- virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
}
EXPORT_SYMBOL_GPL(register_virtio_device);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 47dce91f788c..c22ff0117b46 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -531,8 +531,8 @@ static int init_vqs(struct virtio_balloon *vb)
callbacks[VIRTIO_BALLOON_VQ_REPORTING] = balloon_ack;
}
- err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX,
- vqs, callbacks, names, NULL, NULL);
+ err = virtio_find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, vqs,
+ callbacks, names, NULL);
if (err)
return err;
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index b91bc810a87e..bef8ad6bf466 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -143,6 +143,8 @@ struct virtio_mem {
* add_memory_driver_managed().
*/
const char *resource_name;
+ /* Memory group identification. */
+ int mgid;
/*
* We don't want to add too much memory if it's not getting onlined,
@@ -626,8 +628,8 @@ static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr,
addr + size - 1);
/* Memory might get onlined immediately. */
atomic64_add(size, &vm->offline_size);
- rc = add_memory_driver_managed(vm->nid, addr, size, vm->resource_name,
- MHP_MERGE_RESOURCE);
+ rc = add_memory_driver_managed(vm->mgid, addr, size, vm->resource_name,
+ MHP_MERGE_RESOURCE | MHP_NID_IS_MGID);
if (rc) {
atomic64_sub(size, &vm->offline_size);
dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc);
@@ -677,7 +679,7 @@ static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr,
dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr,
addr + size - 1);
- rc = remove_memory(vm->nid, addr, size);
+ rc = remove_memory(addr, size);
if (!rc) {
atomic64_sub(size, &vm->offline_size);
/*
@@ -720,7 +722,7 @@ static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm,
"offlining and removing memory: 0x%llx - 0x%llx\n", addr,
addr + size - 1);
- rc = offline_and_remove_memory(vm->nid, addr, size);
+ rc = offline_and_remove_memory(addr, size);
if (!rc) {
atomic64_sub(size, &vm->offline_size);
/*
@@ -2569,6 +2571,7 @@ static bool virtio_mem_has_memory_added(struct virtio_mem *vm)
static int virtio_mem_probe(struct virtio_device *vdev)
{
struct virtio_mem *vm;
+ uint64_t unit_pages;
int rc;
BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
@@ -2603,6 +2606,16 @@ static int virtio_mem_probe(struct virtio_device *vdev)
if (rc)
goto out_del_vq;
+ /* use a single dynamic memory group to cover the whole memory device */
+ if (vm->in_sbm)
+ unit_pages = PHYS_PFN(memory_block_size_bytes());
+ else
+ unit_pages = PHYS_PFN(vm->bbm.bb_size);
+ rc = memory_group_register_dynamic(vm->nid, unit_pages);
+ if (rc < 0)
+ goto out_del_resource;
+ vm->mgid = rc;
+
/*
* If we still have memory plugged, we have to unplug all memory first.
* Registering our parent resource makes sure that this memory isn't
@@ -2617,7 +2630,7 @@ static int virtio_mem_probe(struct virtio_device *vdev)
vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
rc = register_memory_notifier(&vm->memory_notifier);
if (rc)
- goto out_del_resource;
+ goto out_unreg_group;
rc = register_virtio_mem_device(vm);
if (rc)
goto out_unreg_mem;
@@ -2631,6 +2644,8 @@ static int virtio_mem_probe(struct virtio_device *vdev)
return 0;
out_unreg_mem:
unregister_memory_notifier(&vm->memory_notifier);
+out_unreg_group:
+ memory_group_unregister(vm->mgid);
out_del_resource:
virtio_mem_delete_resource(vm);
out_del_vq:
@@ -2695,6 +2710,7 @@ static void virtio_mem_remove(struct virtio_device *vdev)
} else {
virtio_mem_delete_resource(vm);
kfree_const(vm->resource_name);
+ memory_group_unregister(vm->mgid);
}
/* remove all tracking data - no locking needed */
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0bc7046ab942..b81fe4f7d434 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -168,18 +168,6 @@ config SOFT_WATCHDOG_PRETIMEOUT
watchdog. Be aware that governors might affect the watchdog because it
is purely software, e.g. the panic governor will stall it!
-config BD70528_WATCHDOG
- tristate "ROHM BD70528 PMIC Watchdog"
- depends on MFD_ROHM_BD70528
- select WATCHDOG_CORE
- help
- Support for the watchdog in the ROHM BD70528 PMIC. Watchdog trigger
- cause system reset.
-
- Say Y here to include support for the ROHM BD70528 watchdog.
- Alternatively say M to compile the driver as a module,
- which will be called bd70528_wdt.
-
config BD957XMUF_WATCHDOG
tristate "ROHM BD9576MUF and BD9573MUF PMIC Watchdog"
depends on MFD_ROHM_BD957XMUF
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index abaf2ebd814e..1bd2d6f37c53 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -204,7 +204,6 @@ obj-$(CONFIG_WATCHDOG_SUN4V) += sun4v_wdt.o
obj-$(CONFIG_XEN_WDT) += xen_wdt.o
# Architecture Independent
-obj-$(CONFIG_BD70528_WATCHDOG) += bd70528_wdt.o
obj-$(CONFIG_BD957XMUF_WATCHDOG) += bd9576_wdt.o
obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index dec6ca019bea..94907176a0e4 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -205,9 +205,13 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
if (err)
return err;
- if (pm_power_off == NULL) {
- pm_power_off = bcm2835_power_off;
- bcm2835_power_off_wdt = wdt;
+ if (of_device_is_system_power_controller(pdev->dev.parent->of_node)) {
+ if (!pm_power_off) {
+ pm_power_off = bcm2835_power_off;
+ bcm2835_power_off_wdt = wdt;
+ } else {
+ dev_info(dev, "Poweroff handler already present!\n");
+ }
}
dev_info(dev, "Broadcom BCM2835 watchdog timer");
diff --git a/drivers/watchdog/bd70528_wdt.c b/drivers/watchdog/bd70528_wdt.c
deleted file mode 100644
index 0170b37e6674..000000000000
--- a/drivers/watchdog/bd70528_wdt.c
+++ /dev/null
@@ -1,291 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 ROHM Semiconductors
-// ROHM BD70528MWV watchdog driver
-
-#include <linux/bcd.h>
-#include <linux/kernel.h>
-#include <linux/mfd/rohm-bd70528.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/watchdog.h>
-
-/*
- * Max time we can set is 1 hour, 59 minutes and 59 seconds
- * and Minimum time is 1 second
- */
-#define WDT_MAX_MS ((2 * 60 * 60 - 1) * 1000)
-#define WDT_MIN_MS 1000
-#define DEFAULT_TIMEOUT 60
-
-#define WD_CTRL_MAGIC1 0x55
-#define WD_CTRL_MAGIC2 0xAA
-
-struct wdtbd70528 {
- struct device *dev;
- struct regmap *regmap;
- struct rohm_regmap_dev *mfd;
- struct watchdog_device wdt;
-};
-
-/**
- * bd70528_wdt_set - arm or disarm watchdog timer
- *
- * @data: device data for the PMIC instance we want to operate on
- * @enable: new state of WDT. zero to disable, non zero to enable
- * @old_state: previous state of WDT will be filled here
- *
- * Arm or disarm WDT on BD70528 PMIC. Expected to be called only by
- * BD70528 RTC and BD70528 WDT drivers. The rtc_timer_lock must be taken
- * by calling bd70528_wdt_lock before calling bd70528_wdt_set.
- */
-int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable, int *old_state)
-{
- int ret, i;
- unsigned int tmp;
- struct bd70528_data *bd70528 = container_of(data, struct bd70528_data,
- chip);
- u8 wd_ctrl_arr[3] = { WD_CTRL_MAGIC1, WD_CTRL_MAGIC2, 0 };
- u8 *wd_ctrl = &wd_ctrl_arr[2];
-
- ret = regmap_read(bd70528->chip.regmap, BD70528_REG_WDT_CTRL, &tmp);
- if (ret)
- return ret;
-
- *wd_ctrl = (u8)tmp;
-
- if (old_state) {
- if (*wd_ctrl & BD70528_MASK_WDT_EN)
- *old_state |= BD70528_WDT_STATE_BIT;
- else
- *old_state &= ~BD70528_WDT_STATE_BIT;
- if ((!enable) == (!(*old_state & BD70528_WDT_STATE_BIT)))
- return 0;
- }
-
- if (enable) {
- if (*wd_ctrl & BD70528_MASK_WDT_EN)
- return 0;
- *wd_ctrl |= BD70528_MASK_WDT_EN;
- } else {
- if (*wd_ctrl & BD70528_MASK_WDT_EN)
- *wd_ctrl &= ~BD70528_MASK_WDT_EN;
- else
- return 0;
- }
-
- for (i = 0; i < 3; i++) {
- ret = regmap_write(bd70528->chip.regmap, BD70528_REG_WDT_CTRL,
- wd_ctrl_arr[i]);
- if (ret)
- return ret;
- }
-
- ret = regmap_read(bd70528->chip.regmap, BD70528_REG_WDT_CTRL, &tmp);
- if ((tmp & BD70528_MASK_WDT_EN) != (*wd_ctrl & BD70528_MASK_WDT_EN)) {
- dev_err(bd70528->chip.dev,
- "Watchdog ctrl mismatch (hw) 0x%x (set) 0x%x\n",
- tmp, *wd_ctrl);
- ret = -EIO;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(bd70528_wdt_set);
-
-/**
- * bd70528_wdt_lock - take WDT lock
- *
- * @data: device data for the PMIC instance we want to operate on
- *
- * Lock WDT for arming/disarming in order to avoid race condition caused
- * by WDT state changes initiated by WDT and RTC drivers.
- */
-void bd70528_wdt_lock(struct rohm_regmap_dev *data)
-{
- struct bd70528_data *bd70528 = container_of(data, struct bd70528_data,
- chip);
-
- mutex_lock(&bd70528->rtc_timer_lock);
-}
-EXPORT_SYMBOL(bd70528_wdt_lock);
-
-/**
- * bd70528_wdt_unlock - unlock WDT lock
- *
- * @data: device data for the PMIC instance we want to operate on
- *
- * Unlock WDT lock which has previously been taken by call to
- * bd70528_wdt_lock.
- */
-void bd70528_wdt_unlock(struct rohm_regmap_dev *data)
-{
- struct bd70528_data *bd70528 = container_of(data, struct bd70528_data,
- chip);
-
- mutex_unlock(&bd70528->rtc_timer_lock);
-}
-EXPORT_SYMBOL(bd70528_wdt_unlock);
-
-static int bd70528_wdt_set_locked(struct wdtbd70528 *w, int enable)
-{
- return bd70528_wdt_set(w->mfd, enable, NULL);
-}
-
-static int bd70528_wdt_change(struct wdtbd70528 *w, int enable)
-{
- int ret;
-
- bd70528_wdt_lock(w->mfd);
- ret = bd70528_wdt_set_locked(w, enable);
- bd70528_wdt_unlock(w->mfd);
-
- return ret;
-}
-
-static int bd70528_wdt_start(struct watchdog_device *wdt)
-{
- struct wdtbd70528 *w = watchdog_get_drvdata(wdt);
-
- dev_dbg(w->dev, "WDT ping...\n");
- return bd70528_wdt_change(w, 1);
-}
-
-static int bd70528_wdt_stop(struct watchdog_device *wdt)
-{
- struct wdtbd70528 *w = watchdog_get_drvdata(wdt);
-
- dev_dbg(w->dev, "WDT stopping...\n");
- return bd70528_wdt_change(w, 0);
-}
-
-static int bd70528_wdt_set_timeout(struct watchdog_device *wdt,
- unsigned int timeout)
-{
- unsigned int hours;
- unsigned int minutes;
- unsigned int seconds;
- int ret;
- struct wdtbd70528 *w = watchdog_get_drvdata(wdt);
-
- seconds = timeout;
- hours = timeout / (60 * 60);
- /* Maximum timeout is 1h 59m 59s => hours is 1 or 0 */
- if (hours)
- seconds -= (60 * 60);
- minutes = seconds / 60;
- seconds = seconds % 60;
-
- bd70528_wdt_lock(w->mfd);
-
- ret = bd70528_wdt_set_locked(w, 0);
- if (ret)
- goto out_unlock;
-
- ret = regmap_update_bits(w->regmap, BD70528_REG_WDT_HOUR,
- BD70528_MASK_WDT_HOUR, hours);
- if (ret) {
- dev_err(w->dev, "Failed to set WDT hours\n");
- goto out_en_unlock;
- }
- ret = regmap_update_bits(w->regmap, BD70528_REG_WDT_MINUTE,
- BD70528_MASK_WDT_MINUTE, bin2bcd(minutes));
- if (ret) {
- dev_err(w->dev, "Failed to set WDT minutes\n");
- goto out_en_unlock;
- }
- ret = regmap_update_bits(w->regmap, BD70528_REG_WDT_SEC,
- BD70528_MASK_WDT_SEC, bin2bcd(seconds));
- if (ret)
- dev_err(w->dev, "Failed to set WDT seconds\n");
- else
- dev_dbg(w->dev, "WDT tmo set to %u\n", timeout);
-
-out_en_unlock:
- ret = bd70528_wdt_set_locked(w, 1);
-out_unlock:
- bd70528_wdt_unlock(w->mfd);
-
- return ret;
-}
-
-static const struct watchdog_info bd70528_wdt_info = {
- .identity = "bd70528-wdt",
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
-};
-
-static const struct watchdog_ops bd70528_wdt_ops = {
- .start = bd70528_wdt_start,
- .stop = bd70528_wdt_stop,
- .set_timeout = bd70528_wdt_set_timeout,
-};
-
-static int bd70528_wdt_probe(struct platform_device *pdev)
-{
- struct rohm_regmap_dev *bd70528;
- struct wdtbd70528 *w;
- int ret;
- unsigned int reg;
-
- bd70528 = dev_get_drvdata(pdev->dev.parent);
- if (!bd70528) {
- dev_err(&pdev->dev, "No MFD driver data\n");
- return -EINVAL;
- }
- w = devm_kzalloc(&pdev->dev, sizeof(*w), GFP_KERNEL);
- if (!w)
- return -ENOMEM;
-
- w->regmap = bd70528->regmap;
- w->mfd = bd70528;
- w->dev = &pdev->dev;
-
- w->wdt.info = &bd70528_wdt_info;
- w->wdt.ops = &bd70528_wdt_ops;
- w->wdt.min_hw_heartbeat_ms = WDT_MIN_MS;
- w->wdt.max_hw_heartbeat_ms = WDT_MAX_MS;
- w->wdt.parent = pdev->dev.parent;
- w->wdt.timeout = DEFAULT_TIMEOUT;
- watchdog_set_drvdata(&w->wdt, w);
- watchdog_init_timeout(&w->wdt, 0, pdev->dev.parent);
-
- ret = bd70528_wdt_set_timeout(&w->wdt, w->wdt.timeout);
- if (ret) {
- dev_err(&pdev->dev, "Failed to set the watchdog timeout\n");
- return ret;
- }
-
- bd70528_wdt_lock(w->mfd);
- ret = regmap_read(w->regmap, BD70528_REG_WDT_CTRL, &reg);
- bd70528_wdt_unlock(w->mfd);
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to get the watchdog state\n");
- return ret;
- }
- if (reg & BD70528_MASK_WDT_EN) {
- dev_dbg(&pdev->dev, "watchdog was running during probe\n");
- set_bit(WDOG_HW_RUNNING, &w->wdt.status);
- }
-
- ret = devm_watchdog_register_device(&pdev->dev, &w->wdt);
- if (ret < 0)
- dev_err(&pdev->dev, "watchdog registration failed: %d\n", ret);
-
- return ret;
-}
-
-static struct platform_driver bd70528_wdt = {
- .driver = {
- .name = "bd70528-wdt"
- },
- .probe = bd70528_wdt_probe,
-};
-
-module_platform_driver(bd70528_wdt);
-
-MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_DESCRIPTION("BD70528 watchdog driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:bd70528-wdt");
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index b3f604669e2c..643c6c2d0b72 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -362,7 +362,7 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
* Otherwise, the BIOS generally reboots when the SMI triggers.
*/
if (p->smi_res &&
- (SMI_EN(p) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN))
+ (inl(SMI_EN(p)) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN))
tmrval /= 2;
/* from the specs: */
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index cc86018c5eb5..51bfb796898b 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -317,6 +317,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(wdog, nowayout);
watchdog_set_restart_priority(wdog, 128);
watchdog_init_timeout(wdog, timeout, dev);
+ watchdog_stop_ping_on_suspend(wdog);
if (imx2_wdt_is_running(wdev)) {
imx2_wdt_set_timeout(wdog, wdog->timeout);
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 3a899628a834..9e1541cfae0d 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -26,6 +26,7 @@
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/property.h>
#define DEFAULT_HEARTBEAT 60
#define MAX_HEARTBEAT 60
@@ -99,8 +100,8 @@ static const struct max63xx_timeout max6373_table[] = {
{ },
};
-static struct max63xx_timeout *
-max63xx_select_timeout(struct max63xx_timeout *table, int value)
+static const struct max63xx_timeout *
+max63xx_select_timeout(const struct max63xx_timeout *table, int value)
{
while (table->twd) {
if (value <= table->twd) {
@@ -202,14 +203,17 @@ static int max63xx_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct max63xx_wdt *wdt;
- struct max63xx_timeout *table;
+ const struct max63xx_timeout *table;
int err;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- table = (struct max63xx_timeout *)pdev->id_entry->driver_data;
+ /* Attempt to use fwnode first */
+ table = device_get_match_data(dev);
+ if (!table)
+ table = (struct max63xx_timeout *)pdev->id_entry->driver_data;
if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
heartbeat = DEFAULT_HEARTBEAT;
@@ -255,11 +259,23 @@ static const struct platform_device_id max63xx_id_table[] = {
};
MODULE_DEVICE_TABLE(platform, max63xx_id_table);
+static const struct of_device_id max63xx_dt_id_table[] = {
+ { .compatible = "maxim,max6369", .data = max6369_table, },
+ { .compatible = "maxim,max6370", .data = max6369_table, },
+ { .compatible = "maxim,max6371", .data = max6371_table, },
+ { .compatible = "maxim,max6372", .data = max6371_table, },
+ { .compatible = "maxim,max6373", .data = max6373_table, },
+ { .compatible = "maxim,max6374", .data = max6373_table, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max63xx_dt_id_table);
+
static struct platform_driver max63xx_wdt_driver = {
.probe = max63xx_wdt_probe,
.id_table = max63xx_id_table,
.driver = {
.name = "max63xx_wdt",
+ .of_match_table = max63xx_dt_id_table,
},
};
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 2f7ded32e878..1c569be72ea2 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -118,7 +118,7 @@ static struct watchdog_info mpc8xxx_wdt_info = {
.identity = "MPC8xxx",
};
-static struct watchdog_ops mpc8xxx_wdt_ops = {
+static const struct watchdog_ops mpc8xxx_wdt_ops = {
.owner = THIS_MODULE,
.start = mpc8xxx_wdt_start,
.ping = mpc8xxx_wdt_ping,
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 16b6aff324a7..796fbb048cbe 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -12,6 +12,7 @@
#include <dt-bindings/reset-controller/mt2712-resets.h>
#include <dt-bindings/reset-controller/mt8183-resets.h>
#include <dt-bindings/reset-controller/mt8192-resets.h>
+#include <dt-bindings/reset/mt8195-resets.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -82,6 +83,10 @@ static const struct mtk_wdt_data mt8192_data = {
.toprgu_sw_rst_num = MT8192_TOPRGU_SW_RST_NUM,
};
+static const struct mtk_wdt_data mt8195_data = {
+ .toprgu_sw_rst_num = MT8195_TOPRGU_SW_RST_NUM,
+};
+
static int toprgu_reset_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
@@ -408,6 +413,7 @@ static const struct of_device_id mtk_wdt_dt_ids[] = {
{ .compatible = "mediatek,mt6589-wdt" },
{ .compatible = "mediatek,mt8183-wdt", .data = &mt8183_data },
{ .compatible = "mediatek,mt8192-wdt", .data = &mt8192_data },
+ { .compatible = "mediatek,mt8195-wdt", .data = &mt8195_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
diff --git a/drivers/watchdog/sl28cpld_wdt.c b/drivers/watchdog/sl28cpld_wdt.c
index 2de93298475f..9ce456f09f73 100644
--- a/drivers/watchdog/sl28cpld_wdt.c
+++ b/drivers/watchdog/sl28cpld_wdt.c
@@ -108,7 +108,7 @@ static const struct watchdog_info sl28cpld_wdt_info = {
.identity = "sl28cpld watchdog",
};
-static struct watchdog_ops sl28cpld_wdt_ops = {
+static const struct watchdog_ops sl28cpld_wdt_ops = {
.owner = THIS_MODULE,
.start = sl28cpld_wdt_start,
.stop = sl28cpld_wdt_stop,
diff --git a/drivers/watchdog/tqmx86_wdt.c b/drivers/watchdog/tqmx86_wdt.c
index 72d0b0adde38..83860e94ce9d 100644
--- a/drivers/watchdog/tqmx86_wdt.c
+++ b/drivers/watchdog/tqmx86_wdt.c
@@ -62,7 +62,7 @@ static const struct watchdog_info tqmx86_wdt_info = {
.identity = "TQMx86 Watchdog",
};
-static struct watchdog_ops tqmx86_wdt_ops = {
+static const struct watchdog_ops tqmx86_wdt_ops = {
.owner = THIS_MODULE,
.start = tqmx86_wdt_start,
.set_timeout = tqmx86_wdt_set_timeout,
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 5df0a22e2cb4..3fe8a7edc252 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -34,6 +34,7 @@
#include <linux/idr.h> /* For ida_* macros */
#include <linux/err.h> /* For IS_ERR macros */
#include <linux/of.h> /* For of_get_timeout_sec */
+#include <linux/suspend.h>
#include "watchdog_core.h" /* For watchdog_dev_register/... */
@@ -185,6 +186,33 @@ static int watchdog_restart_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static int watchdog_pm_notifier(struct notifier_block *nb, unsigned long mode,
+ void *data)
+{
+ struct watchdog_device *wdd;
+ int ret = 0;
+
+ wdd = container_of(nb, struct watchdog_device, pm_nb);
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ ret = watchdog_dev_suspend(wdd);
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ ret = watchdog_dev_resume(wdd);
+ break;
+ }
+
+ if (ret)
+ return NOTIFY_BAD;
+
+ return NOTIFY_DONE;
+}
+
/**
* watchdog_set_restart_priority - Change priority of restart handler
* @wdd: watchdog device
@@ -292,6 +320,15 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
wdd->id, ret);
}
+ if (test_bit(WDOG_NO_PING_ON_SUSPEND, &wdd->status)) {
+ wdd->pm_nb.notifier_call = watchdog_pm_notifier;
+
+ ret = register_pm_notifier(&wdd->pm_nb);
+ if (ret)
+ pr_warn("watchdog%d: Cannot register pm handler (%d)\n",
+ wdd->id, ret);
+ }
+
return 0;
}
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 3bab32485273..3a3d8b5c7ad5 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -401,7 +401,7 @@ static int watchdog_set_pretimeout(struct watchdog_device *wdd,
if (watchdog_pretimeout_invalid(wdd, timeout))
return -EINVAL;
- if (wdd->ops->set_pretimeout)
+ if (wdd->ops->set_pretimeout && (wdd->info->options & WDIOF_PRETIMEOUT))
err = wdd->ops->set_pretimeout(wdd, timeout);
else
wdd->pretimeout = timeout;
@@ -1096,6 +1096,8 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
watchdog_stop(wdd);
}
+ watchdog_hrtimer_pretimeout_stop(wdd);
+
mutex_lock(&wd_data->lock);
wd_data->wdd = NULL;
wdd->wd_data = NULL;
@@ -1103,7 +1105,6 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
hrtimer_cancel(&wd_data->timer);
kthread_cancel_work_sync(&wd_data->work);
- watchdog_hrtimer_pretimeout_stop(wdd);
put_device(&wd_data->dev);
}
@@ -1172,7 +1173,10 @@ int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
wd_data->last_hw_keepalive = ktime_sub(now, ms_to_ktime(last_ping_ms));
- return __watchdog_ping(wdd);
+ if (watchdog_hw_running(wdd) && handle_boot_enabled)
+ return __watchdog_ping(wdd);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive);
@@ -1227,6 +1231,53 @@ void __exit watchdog_dev_exit(void)
kthread_destroy_worker(watchdog_kworker);
}
+int watchdog_dev_suspend(struct watchdog_device *wdd)
+{
+ struct watchdog_core_data *wd_data = wdd->wd_data;
+ int ret = 0;
+
+ if (!wdd->wd_data)
+ return -ENODEV;
+
+ /* ping for the last time before suspend */
+ mutex_lock(&wd_data->lock);
+ if (watchdog_worker_should_ping(wd_data))
+ ret = __watchdog_ping(wd_data->wdd);
+ mutex_unlock(&wd_data->lock);
+
+ if (ret)
+ return ret;
+
+ /*
+ * make sure that watchdog worker will not kick in when the wdog is
+ * suspended
+ */
+ hrtimer_cancel(&wd_data->timer);
+ kthread_cancel_work_sync(&wd_data->work);
+
+ return 0;
+}
+
+int watchdog_dev_resume(struct watchdog_device *wdd)
+{
+ struct watchdog_core_data *wd_data = wdd->wd_data;
+ int ret = 0;
+
+ if (!wdd->wd_data)
+ return -ENODEV;
+
+ /*
+ * __watchdog_ping will also retrigger hrtimer and therefore restore the
+ * ping worker if needed.
+ */
+ mutex_lock(&wd_data->lock);
+ if (watchdog_worker_should_ping(wd_data))
+ ret = __watchdog_ping(wd_data->wdd);
+ mutex_unlock(&wd_data->lock);
+
+ return ret;
+}
+
module_param(handle_boot_enabled, bool, 0444);
MODULE_PARM_DESC(handle_boot_enabled,
"Watchdog core auto-updates boot enabled watchdogs before userspace takes over (default="
diff --git a/drivers/xen/features.c b/drivers/xen/features.c
index 25c053b09605..7b591443833c 100644
--- a/drivers/xen/features.c
+++ b/drivers/xen/features.c
@@ -9,13 +9,26 @@
#include <linux/types.h>
#include <linux/cache.h>
#include <linux/export.h>
+#include <linux/printk.h>
#include <asm/xen/hypercall.h>
+#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/interface/version.h>
#include <xen/features.h>
+/*
+ * Linux kernel expects at least Xen 4.0.
+ *
+ * Assume some features to be available for that reason (depending on guest
+ * mode, of course).
+ */
+#define chk_required_feature(f) { \
+ if (!xen_feature(f)) \
+ panic("Xen: feature %s not available!\n", #f); \
+ }
+
u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly;
EXPORT_SYMBOL_GPL(xen_features);
@@ -31,4 +44,9 @@ void xen_setup_features(void)
for (j = 0; j < 32; j++)
xen_features[i * 32 + j] = !!(fi.submap & 1<<j);
}
+
+ if (xen_pv_domain()) {
+ chk_required_feature(XENFEAT_mmu_pt_update_preserve_ad);
+ chk_required_feature(XENFEAT_gnttab_map_avail_bits);
+ }
}
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index a3e7be96527d..1e7f6b1c0c97 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -266,20 +266,13 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
{
struct gntdev_grant_map *map = data;
unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
- int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
+ int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte |
+ (1 << _GNTMAP_guest_avail0);
u64 pte_maddr;
BUG_ON(pgnr >= map->count);
pte_maddr = arbitrary_virt_to_machine(pte).maddr;
- /*
- * Set the PTE as special to force get_user_pages_fast() fall
- * back to the slow path. If this is not supported as part of
- * the grant map, it will be done afterwards.
- */
- if (xen_feature(XENFEAT_gnttab_map_avail_bits))
- flags |= (1 << _GNTMAP_guest_avail0);
-
gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
map->grants[pgnr].ref,
map->grants[pgnr].domid);
@@ -288,14 +281,6 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
return 0;
}
-#ifdef CONFIG_X86
-static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
-{
- set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
- return 0;
-}
-#endif
-
int gntdev_map_grant_pages(struct gntdev_grant_map *map)
{
int i, err = 0;
@@ -1055,23 +1040,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
err = vm_map_pages_zero(vma, map->pages, map->count);
if (err)
goto out_put_map;
- } else {
-#ifdef CONFIG_X86
- /*
- * If the PTEs were not made special by the grant map
- * hypercall, do so here.
- *
- * This is racy since the mapping is already visible
- * to userspace but userspace should be well-behaved
- * enough to not touch it until the mmap() call
- * returns.
- */
- if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
- apply_to_page_range(vma->vm_mm, vma->vm_start,
- vma->vm_end - vma->vm_start,
- set_grant_ptes_as_special, NULL);
- }
-#endif
}
return 0;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 24d11861ac7d..643fe440c46e 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -100,7 +100,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
* in our domain. Therefore _only_ check address within our domain.
*/
if (pfn_valid(PFN_DOWN(paddr)))
- return is_swiotlb_buffer(paddr);
+ return is_swiotlb_buffer(dev, paddr);
return 0;
}
@@ -164,7 +164,7 @@ int __ref xen_swiotlb_init(void)
int rc = -ENOMEM;
char *start;
- if (io_tlb_default_mem != NULL) {
+ if (io_tlb_default_mem.nslabs) {
pr_warn("swiotlb buffer already initialized\n");
return -EEXIST;
}
@@ -374,7 +374,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (dma_capable(dev, dev_addr, size, true) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
- swiotlb_force != SWIOTLB_FORCE)
+ !is_swiotlb_force_bounce(dev))
goto done;
/*
@@ -509,7 +509,7 @@ xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
out_unmap:
xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
sg_dma_len(sgl) = 0;
- return 0;
+ return -EIO;
}
static void
@@ -547,7 +547,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask;
+ return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask;
}
const struct dma_map_ops xen_swiotlb_dma_ops = {
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 61ce0d142eea..0c5e565aa8cf 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -33,8 +33,6 @@
#define pr_fmt(fmt) "xen-pvscsi: " fmt
-#include <stdarg.h>
-
#include <linux/module.h>
#include <linux/utsname.h>
#include <linux/interrupt.h>
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 0cd728961fce..e8bed1cb76ba 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -542,8 +542,7 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
}
}
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
- BUG();
+ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j));
*leaked = false;
for (i = 0; i < j; i++) {
@@ -581,8 +580,7 @@ static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
gnttab_set_unmap_op(&unmap[i], vaddrs[i],
GNTMAP_host_map, handles[i]);
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
- BUG();
+ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i));
err = GNTST_okay;
for (i = 0; i < nr_handles; i++) {
@@ -778,8 +776,7 @@ static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
unmap[i].handle = node->handles[i];
}
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
- BUG();
+ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i));
err = GNTST_okay;
leaked = false;